diff --git a/.gitattributes b/.gitattributes index 74fb36206512fce4565ce0c96c3e8625f1c5ca16..4d02b9c3bfcad8ef94dc3403676baad03d252e17 100644 --- a/.gitattributes +++ b/.gitattributes @@ -35,3 +35,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text vendor-usecase-app/app/furniture/sofa/sofa-1.jpeg filter=lfs diff=lfs merge=lfs -text vendor-usecase-app/generated_image.png filter=lfs diff=lfs merge=lfs -text +app/furniture/sofa/sofa-1.jpeg filter=lfs diff=lfs merge=lfs -text +generated_image.png filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f8f37e61c13fcb6757d0a0288e5545c24d09479e --- /dev/null +++ b/.gitignore @@ -0,0 +1,55 @@ +# These are some examples of commonly ignored file patterns. +# You should customize this list as applicable to your project. +# Learn more about .gitignore: +# https://www.atlassian.com/git/tutorials/saving-changes/gitignore + +# Node artifact files +node_modules/ +dist/ + +# Compiled Java class files +*.class + +# Compiled Python bytecode +*.py[cod] + +# Log files +*.log + +# Package files +*.jar + +# Maven +target/ +dist/ + +# JetBrains IDE +.idea/ + +# Unit test reports +TEST*.xml + +# Generated by MacOS +.DS_Store + +# Generated by Windows +Thumbs.db + +# Applications +*.app +*.exe +*.war + +# Large media files +*.mp4 +*.tiff +*.avi +*.flv +*.mov +*.wmv + +.env +generated_image* +sample_images/ +test.py +pqr.py \ No newline at end of file diff --git a/README.md b/README.md index bc26c6d5e6d13a33cc502b587bbbefd78b86ee49..39af52c077c0d4c3bc7730b362592e0bf7f635db 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,29 @@ ---- -title: VTO -emoji: ⚡ -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 5.49.1 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# README # + +This README would normally document whatever steps are necessary to get your application up and running. + +### What is this repository for? ### + +* Quick summary +* Version +* [Learn Markdown](https://bitbucket.org/tutorials/markdowndemo) + +### How do I get set up? ### + +* Summary of set up +* Configuration +* Dependencies +* Database configuration +* How to run tests +* Deployment instructions + +### Contribution guidelines ### + +* Writing tests +* Code review +* Other guidelines + +### Who do I talk to? ### + +* Repo owner or admin +* Other community or team contact \ No newline at end of file diff --git a/app/__pycache__/main.cpython-310.pyc b/app/__pycache__/main.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88cb64654350ca2899f3b63e3a293f05a315e774 Binary files /dev/null and b/app/__pycache__/main.cpython-310.pyc differ diff --git a/app/__pycache__/main.cpython-38.pyc b/app/__pycache__/main.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bb8707decbdb24187cf43553c67db11301fe585 Binary files /dev/null and b/app/__pycache__/main.cpython-38.pyc differ diff --git a/app/api/__init__.py b/app/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/api/__pycache__/__init__.cpython-310.pyc b/app/api/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3347cff735b485226f17e7abe8e17529b57a6ca3 Binary files /dev/null and b/app/api/__pycache__/__init__.cpython-310.pyc differ diff --git a/app/api/__pycache__/__init__.cpython-38.pyc b/app/api/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..043627ddea2d61e5f179d3011f6bc90ba37196d5 Binary files /dev/null and b/app/api/__pycache__/__init__.cpython-38.pyc differ diff --git a/app/api/routers/v1/__init__.py b/app/api/routers/v1/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/api/routers/v1/__pycache__/__init__.cpython-310.pyc b/app/api/routers/v1/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e341388a183c47077af5383e895c1a5ca56959ca Binary files /dev/null and b/app/api/routers/v1/__pycache__/__init__.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/__init__.cpython-38.pyc b/app/api/routers/v1/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1dcdd932cc3b852f7d04822916ec2603840b8bf Binary files /dev/null and b/app/api/routers/v1/__pycache__/__init__.cpython-38.pyc differ diff --git a/app/api/routers/v1/__pycache__/background_edit.cpython-310.pyc b/app/api/routers/v1/__pycache__/background_edit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..41c5672cac34bd67af195c3b00bab408dc9bf7c8 Binary files /dev/null and b/app/api/routers/v1/__pycache__/background_edit.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/detail_shots.cpython-310.pyc b/app/api/routers/v1/__pycache__/detail_shots.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d341b78c293615537706488fc7820955a8a8f7fd Binary files /dev/null and b/app/api/routers/v1/__pycache__/detail_shots.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/editorial.cpython-310.pyc b/app/api/routers/v1/__pycache__/editorial.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e453bb8c45bae2e8f419e8eeeb694e6689431ef Binary files /dev/null and b/app/api/routers/v1/__pycache__/editorial.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/health.cpython-310.pyc b/app/api/routers/v1/__pycache__/health.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72427085788aa906786a81bdf60b0de54533a0c6 Binary files /dev/null and b/app/api/routers/v1/__pycache__/health.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/mannequin.cpython-310.pyc b/app/api/routers/v1/__pycache__/mannequin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..87547a6a2ce641f05cfbdd4c1d644e8cfc101aa2 Binary files /dev/null and b/app/api/routers/v1/__pycache__/mannequin.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/multi_view.cpython-310.pyc b/app/api/routers/v1/__pycache__/multi_view.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52212b3170a4bbb4bea2f13e0ad29aaf4d123130 Binary files /dev/null and b/app/api/routers/v1/__pycache__/multi_view.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/occasion.cpython-310.pyc b/app/api/routers/v1/__pycache__/occasion.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e17bf36a33ee2ef58605b39568ad5be2f0134340 Binary files /dev/null and b/app/api/routers/v1/__pycache__/occasion.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/outfitmatch.cpython-310.pyc b/app/api/routers/v1/__pycache__/outfitmatch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0647382737470d067d0a7681176ff8555fbde8fb Binary files /dev/null and b/app/api/routers/v1/__pycache__/outfitmatch.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/outfitmatch.cpython-38.pyc b/app/api/routers/v1/__pycache__/outfitmatch.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..adafc35eea4361df9ba1d4517e7076561aebd4a9 Binary files /dev/null and b/app/api/routers/v1/__pycache__/outfitmatch.cpython-38.pyc differ diff --git a/app/api/routers/v1/__pycache__/size_comparision.cpython-310.pyc b/app/api/routers/v1/__pycache__/size_comparision.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33720d357ed1323372f027f04b408cb8c84c7ee0 Binary files /dev/null and b/app/api/routers/v1/__pycache__/size_comparision.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/tryon.cpython-310.pyc b/app/api/routers/v1/__pycache__/tryon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e220232058a99999228631f9f8ae753a7961f23 Binary files /dev/null and b/app/api/routers/v1/__pycache__/tryon.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/two_object_size.cpython-310.pyc b/app/api/routers/v1/__pycache__/two_object_size.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0709ea02d3b134c6fb4fa9a4fb5f3ec287dab5fa Binary files /dev/null and b/app/api/routers/v1/__pycache__/two_object_size.cpython-310.pyc differ diff --git a/app/api/routers/v1/__pycache__/whatfits.cpython-310.pyc b/app/api/routers/v1/__pycache__/whatfits.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c41728be2eee0b1a2dc63f0118894be34aafcd4d Binary files /dev/null and b/app/api/routers/v1/__pycache__/whatfits.cpython-310.pyc differ diff --git a/app/api/routers/v1/background_edit.py b/app/api/routers/v1/background_edit.py new file mode 100644 index 0000000000000000000000000000000000000000..2833179d55333abf6e642b367a613fa91e9c56bd --- /dev/null +++ b/app/api/routers/v1/background_edit.py @@ -0,0 +1,70 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import BACKGROUND_EDIT_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType,SurfaceType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/background_edit", tags=["Background_edit"]) +async def background_edit( + garment_type: Annotated[str, Form()], + surface_type: Annotated[str, Form()], + camera_view_angle: Annotated[str, Form()], + camera_distance_meters: Annotated[float, Form()], + camera_focal_length_mm: Annotated[float, Form()], + camera_aperture_f_number: Annotated[float, Form()], + camera_lighting_condition: Annotated[str, Form()], + camera_background: Annotated[str, Form()], + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + """ + Generate a try-on image based on garment images, model characteristics, and camera settings. + """ + try: + garment=GarmentType(garment_type=garment_type) + surface=SurfaceType(surface_type=surface_type) + camera_settings = CameraSchema( + view_angle=camera_view_angle, + distance_meters=camera_distance_meters, + focal_length_mm=camera_focal_length_mm, + aperture_f_number=camera_aperture_f_number, + lighting_condition=camera_lighting_condition, + background=camera_background + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid model or camera attributes: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=BACKGROUND_EDIT_TEMPLATE.format( + garment_type=garment.garment_type, + surface_type=surface.surface_type, + camera_view_angle=camera_settings.view_angle, + camera_distance_meters=camera_settings.distance_meters, + camera_focal_length_mm=camera_settings.focal_length_mm, + camera_aperture_f_number=camera_settings.aperture_f_number, + camera_lighting_condition=camera_settings.lighting_condition, + camera_background=camera_settings.background + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/detail_shots.py b/app/api/routers/v1/detail_shots.py new file mode 100644 index 0000000000000000000000000000000000000000..3dffe1089880524e36b9276308fdcb5c273d04d2 --- /dev/null +++ b/app/api/routers/v1/detail_shots.py @@ -0,0 +1,37 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import DETAIL_SHOTS_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/detail", tags=["details-shots"]) +async def detail( + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=DETAIL_SHOTS_TEMPLATE + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/editorial.py b/app/api/routers/v1/editorial.py new file mode 100644 index 0000000000000000000000000000000000000000..d85d0e3159ad582987a970f49cb89b3cad8d0b86 --- /dev/null +++ b/app/api/routers/v1/editorial.py @@ -0,0 +1,81 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import EDITORIAL_PROMPT_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/editorial", tags=["Editorial"]) +async def tryon( + model_gender: Annotated[str, Form()], + model_age_range: Annotated[str, Form()], + model_body_shape: Annotated[str, Form()], + model_race_ethnicity: Annotated[str, Form()], + model_pose: Annotated[str, Form()], + camera_view_angle: Annotated[str, Form()], + camera_distance_meters: Annotated[float, Form()], + camera_focal_length_mm: Annotated[float, Form()], + camera_aperture_f_number: Annotated[float, Form()], + camera_lighting_condition: Annotated[str, Form()], + camera_background: Annotated[str, Form()], + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + """ + Generate a editorial style image based on garment images, model characteristics, and camera settings. + """ + try: + model_attributes = ModelSchema( + gender=model_gender, + age_range=model_age_range, + body_shape=model_body_shape, + race_ethnicity=model_race_ethnicity, + pose=model_pose + ) + camera_settings = CameraSchema( + view_angle=camera_view_angle, + distance_meters=camera_distance_meters, + focal_length_mm=camera_focal_length_mm, + aperture_f_number=camera_aperture_f_number, + lighting_condition=camera_lighting_condition, + background=camera_background + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid model or camera attributes: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=EDITORIAL_PROMPT_TEMPLATE.format( + model_gender=model_attributes.gender, + model_age_range=model_attributes.age_range, + model_race_ethnicity=model_attributes.race_ethnicity, + model_body_shape=model_attributes.body_shape, + model_pose=model_attributes.pose, + camera_view_angle=camera_settings.view_angle, + camera_distance_meters=camera_settings.distance_meters, + camera_focal_length_mm=camera_settings.focal_length_mm, + camera_aperture_f_number=camera_settings.aperture_f_number, + camera_lighting_condition=camera_settings.lighting_condition, + camera_background=camera_settings.background + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/health.py b/app/api/routers/v1/health.py new file mode 100644 index 0000000000000000000000000000000000000000..e87318acbe3306ca26e8f6cae03a20267f9fa2dd --- /dev/null +++ b/app/api/routers/v1/health.py @@ -0,0 +1,7 @@ +from fastapi import APIRouter + +router = APIRouter() + +@router.get("/health", tags=["Health"]) +async def health_check(): + return {"status": "healthy", "message": "API is up and running!"} diff --git a/app/api/routers/v1/mannequin.py b/app/api/routers/v1/mannequin.py new file mode 100644 index 0000000000000000000000000000000000000000..5fc695fcbc44ba906709abab26fae8ed8417b8af --- /dev/null +++ b/app/api/routers/v1/mannequin.py @@ -0,0 +1,37 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import MANNEQUIN_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/mannequin", tags=["MANNEQUIN"]) +async def mannequin( + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=MANNEQUIN_TEMPLATE + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/multi_view.py b/app/api/routers/v1/multi_view.py new file mode 100644 index 0000000000000000000000000000000000000000..64969be5d5bcd5cbb927cf2eebac7b240dfb549c --- /dev/null +++ b/app/api/routers/v1/multi_view.py @@ -0,0 +1,37 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import MULTI_VIEW_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/multi_view", tags=["Multi_view"]) +async def multi_view( + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=MULTI_VIEW_TEMPLATE + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/occasion.py b/app/api/routers/v1/occasion.py new file mode 100644 index 0000000000000000000000000000000000000000..4a326a7cc9dc36628df34482a2fe3eebc531359e --- /dev/null +++ b/app/api/routers/v1/occasion.py @@ -0,0 +1,84 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import OCCASION_BASED_PROMPT_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/occasion", tags=["occasion-based-styling"]) +async def occasion_based( + garment_type: Annotated[str, Form()], + model_gender: Annotated[str, Form()], + model_age_range: Annotated[str, Form()], + model_body_shape: Annotated[str, Form()], + model_race_ethnicity: Annotated[str, Form()], + model_pose: Annotated[str, Form()], + camera_view_angle: Annotated[str, Form()], + camera_distance_meters: Annotated[float, Form()], + camera_focal_length_mm: Annotated[float, Form()], + camera_aperture_f_number: Annotated[float, Form()], + camera_lighting_condition: Annotated[str, Form()], + camera_background: Annotated[str, Form()], + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + # """ + # Generate a try-on image based on garment images, model characteristics, and camera settings. + # """ + try: + garment=GarmentType(garment_type=garment_type) + model_attributes = ModelSchema( + gender=model_gender, + age_range=model_age_range, + body_shape=model_body_shape, + race_ethnicity=model_race_ethnicity, + pose=model_pose + ) + camera_settings = CameraSchema( + view_angle=camera_view_angle, + distance_meters=camera_distance_meters, + focal_length_mm=camera_focal_length_mm, + aperture_f_number=camera_aperture_f_number, + lighting_condition=camera_lighting_condition, + background=camera_background + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid model or camera attributes: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=OCCASION_BASED_PROMPT_TEMPLATE.format( + garment_type=garment.garment_type, + model_gender=model_attributes.gender, + model_age_range=model_attributes.age_range, + model_race_ethnicity=model_attributes.race_ethnicity, + model_body_shape=model_attributes.body_shape, + model_pose=model_attributes.pose, + camera_view_angle=camera_settings.view_angle, + camera_distance_meters=camera_settings.distance_meters, + camera_focal_length_mm=camera_settings.focal_length_mm, + camera_aperture_f_number=camera_settings.aperture_f_number, + camera_lighting_condition=camera_settings.lighting_condition, + camera_background=camera_settings.background + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/outfitmatch.py b/app/api/routers/v1/outfitmatch.py new file mode 100644 index 0000000000000000000000000000000000000000..e387b886b4d945bbd1296fb731dd32afd64a14fc --- /dev/null +++ b/app/api/routers/v1/outfitmatch.py @@ -0,0 +1,84 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.schemas.common_schemas import ModelSchema, CameraSchema, GarmentType +from app.services.prompt_factory import OUTFIT_MATCH_PROMPT_TEMPLATE +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/outfit-match", tags=["Outfit Match"]) +async def outfit_match_tryon( + garment_type: Annotated[str, Form()], + model_gender: Annotated[str, Form()], + model_age_range: Annotated[str, Form()], + model_body_shape: Annotated[str, Form()], + model_race_ethnicity: Annotated[str, Form()], + model_pose: Annotated[str, Form()], + camera_view_angle: Annotated[str, Form()], + camera_distance_meters: Annotated[float, Form()], + camera_focal_length_mm: Annotated[float, Form()], + camera_aperture_f_number: Annotated[float, Form()], + camera_lighting_condition: Annotated[str, Form()], + camera_background: Annotated[str, Form()], + garment_images: List[UploadFile] = File(..., description="Exactly two images of the garments (e.g., dress and bag).", min_items=2, max_items=2), +): + """ + Generate a try-on image based on garment images, model characteristics, and camera settings. + """ + try: + garment=GarmentType(garment_type=garment_type) + model_attributes = ModelSchema( + gender=model_gender, + age_range=model_age_range, + body_shape=model_body_shape, + race_ethnicity=model_race_ethnicity, + pose=model_pose + ) + camera_settings = CameraSchema( + view_angle=camera_view_angle, + distance_meters=camera_distance_meters, + focal_length_mm=camera_focal_length_mm, + aperture_f_number=camera_aperture_f_number, + lighting_condition=camera_lighting_condition, + background=camera_background + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid model or camera attributes: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 2: + raise HTTPException(status_code=400, detail="Exactly two garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=OUTFIT_MATCH_PROMPT_TEMPLATE.format( + garment_type=garment.garment_type, + model_gender=model_attributes.gender, + model_age_range=model_attributes.age_range, + model_race_ethnicity=model_attributes.race_ethnicity, + model_body_shape=model_attributes.body_shape, + model_pose=model_attributes.pose, + camera_view_angle=camera_settings.view_angle, + camera_distance_meters=camera_settings.distance_meters, + camera_focal_length_mm=camera_settings.focal_length_mm, + camera_aperture_f_number=camera_settings.aperture_f_number, + camera_lighting_condition=camera_settings.lighting_condition, + camera_background=camera_settings.background + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/size_comparision.py b/app/api/routers/v1/size_comparision.py new file mode 100644 index 0000000000000000000000000000000000000000..5aa1d79cfc4ec782efd950e755072292c503628e --- /dev/null +++ b/app/api/routers/v1/size_comparision.py @@ -0,0 +1,68 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import SIZE_COMPARISION_TEMPLATE, JEWELLERY_SIZE_COMPARISION_TEMPLATE +from app.schemas.common_schemas import DimensionSchema,GarmentType +import io +from typing import Annotated, List, Optional + +router = APIRouter() + +@router.post("/size", tags=["size-comparision"]) +async def size( + garment_type: str = Form(...), + product_height: Optional[float] = Form(default=None), + product_width: Optional[float] = Form(default=None), + product_length: Optional[float] = Form(default=None), + garment_images: List[UploadFile] = File(..., description="Front-shot image of the handbag/jewellery", min_items=1, max_items=1), +): + # """ + # Generate what fits inside the bag + # """ + try: + product_dimensions=DimensionSchema( + height_cm=product_height, + width_cm=product_width, + length_cm=product_length + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid dimensions: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one front shot of the image") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + if garment_type=="handbags": + prompt = SIZE_COMPARISION_TEMPLATE.format( + height_cm=product_dimensions.height_cm if product_dimensions.height_cm is not None else "N/A", + width_cm=product_dimensions.width_cm if product_dimensions.width_cm is not None else "N/A", + length_cm=product_dimensions.length_cm if product_dimensions.length_cm is not None else "N/A" + ) + + elif garment_type=="jewellery and watches": + prompt=JEWELLERY_SIZE_COMPARISION_TEMPLATE.format( + height_cm=product_dimensions.height_cm if product_dimensions.height_cm is not None else "N/A", + width_cm=product_dimensions.width_cm if product_dimensions.width_cm is not None else "N/A", + length_cm=product_dimensions.length_cm if product_dimensions.length_cm is not None else "N/A" + ) + else: + raise HTTPException(status_code=400, detail=f"Invalid garment type") + + + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/tryon.py b/app/api/routers/v1/tryon.py new file mode 100644 index 0000000000000000000000000000000000000000..2cc83ad857cade29c6b8c0721baec46e5ff88c98 --- /dev/null +++ b/app/api/routers/v1/tryon.py @@ -0,0 +1,84 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import LIFESTYLE_TRYON_PROMPT_TEMPLATE +from app.schemas.common_schemas import ModelSchema, CameraSchema,GarmentType +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/tryon", tags=["TRY-ON"]) +async def tryon( + garment_type: Annotated[str, Form()], + model_gender: Annotated[str, Form()], + model_age_range: Annotated[str, Form()], + model_body_shape: Annotated[str, Form()], + model_race_ethnicity: Annotated[str, Form()], + model_pose: Annotated[str, Form()], + camera_view_angle: Annotated[str, Form()], + camera_distance_meters: Annotated[float, Form()], + camera_focal_length_mm: Annotated[float, Form()], + camera_aperture_f_number: Annotated[float, Form()], + camera_lighting_condition: Annotated[str, Form()], + camera_background: Annotated[str, Form()], + garment_images: List[UploadFile] = File(..., description="Exactly one image of the garments (e.g., dress /bag/shoes).", min_items=1, max_items=1), +): + """ + Generate a try-on image based on garment images, model characteristics, and camera settings. + """ + try: + garment=GarmentType(garment_type=garment_type) + model_attributes = ModelSchema( + gender=model_gender, + age_range=model_age_range, + body_shape=model_body_shape, + race_ethnicity=model_race_ethnicity, + pose=model_pose + ) + camera_settings = CameraSchema( + view_angle=camera_view_angle, + distance_meters=camera_distance_meters, + focal_length_mm=camera_focal_length_mm, + aperture_f_number=camera_aperture_f_number, + lighting_condition=camera_lighting_condition, + background=camera_background + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid model or camera attributes: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one garment images are required for outfit match.") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=LIFESTYLE_TRYON_PROMPT_TEMPLATE.format( + garment_type=garment.garment_type, + model_gender=model_attributes.gender, + model_age_range=model_attributes.age_range, + model_race_ethnicity=model_attributes.race_ethnicity, + model_body_shape=model_attributes.body_shape, + model_pose=model_attributes.pose, + camera_view_angle=camera_settings.view_angle, + camera_distance_meters=camera_settings.distance_meters, + camera_focal_length_mm=camera_settings.focal_length_mm, + camera_aperture_f_number=camera_settings.aperture_f_number, + camera_lighting_condition=camera_settings.lighting_condition, + camera_background=camera_settings.background + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/two_object_size.py b/app/api/routers/v1/two_object_size.py new file mode 100644 index 0000000000000000000000000000000000000000..02c26c060a9f9cd326a57cf76b946f2479ec335d --- /dev/null +++ b/app/api/routers/v1/two_object_size.py @@ -0,0 +1,65 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import WALLET_SIZE_COMPARISION_TEMPLATE +from app.schemas.common_schemas import DimensionSchema +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/walletsize", tags=["wallet-size-comparision"]) +async def size( + product_height1: Annotated[float, Form()], + product_width1: Annotated[float, Form()], + product_length1: Annotated[float, Form()], + product_height2: Annotated[float, Form()], + product_width2: Annotated[float, Form()], + product_length2: Annotated[float, Form()], + garment_images: List[UploadFile] = File(..., description="Front-shot images of the two handbags", min_items=2, max_items=2), +): + """ + Compare two wallets + """ + try: + + product_dimensions=DimensionSchema( + height_cm1=product_height1, + width_cm1=product_width1, + length_cm1=product_length1, + height_cm2=product_height2, + width_cm2=product_width2, + length_cm2=product_length2 + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid dimensions: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 2: + raise HTTPException(status_code=400, detail="Exactly two front shot images of wallet") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=WALLET_SIZE_COMPARISION_TEMPLATE.format( + height_cm1=product_dimensions.height_cm1, + width_cm1=product_dimensions.width_cm1, + length_cm1=product_dimensions.length_cm1, + height_cm2=product_dimensions.height_cm2, + width_cm2=product_dimensions.width_cm2, + length_cm2=product_dimensions.length_cm2 + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/api/routers/v1/whatfits.py b/app/api/routers/v1/whatfits.py new file mode 100644 index 0000000000000000000000000000000000000000..98d5c80bd849a3427cb27d53c3f9dc844797afdb --- /dev/null +++ b/app/api/routers/v1/whatfits.py @@ -0,0 +1,56 @@ +from fastapi import APIRouter, File, UploadFile, HTTPException, Form +from fastapi.responses import StreamingResponse +from app.services.image_generation_service import generate_image_from_files_and_prompt +from app.services.prompt_factory import WHAT_FITS_INSIDE_TEMPLATE +from app.schemas.common_schemas import DimensionSchema +import io +from typing import List, Annotated + +router = APIRouter() + +@router.post("/whatfits", tags=["what-fits"]) +async def whatfits( + product_height: Annotated[float, Form()], + product_width: Annotated[float, Form()], + product_length: Annotated[float, Form()], + garment_images: List[UploadFile] = File(..., description="Top-shot image of the handbag", min_items=1, max_items=1), +): + """ + Generate what fits inside the bag + """ + try: + + product_dimensions=DimensionSchema( + height_cm=product_height, + width_cm=product_width, + length_cm=product_length + ) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid dimensions: {e}") + + # Validation for exactly two files (FastAPI also handles min_items/max_items for File, but explicit check is fine) + if not garment_images or len(garment_images) != 1: + raise HTTPException(status_code=400, detail="Exactly one top shot of the image") + + image_bytes_list = [] + for image_file in garment_images: # No need to check if garment_images is None here + if not image_file.content_type.startswith("image/"): + raise HTTPException(status_code=400, detail=f"Invalid file type for garment: {image_file.filename}. Must be an image.") + content = await image_file.read() + image_bytes_list.append((content, image_file.filename)) # Store as tuple (bytes, filename) + + prompt=WHAT_FITS_INSIDE_TEMPLATE.format( + height_cm=product_dimensions.height_cm, + width_cm=product_dimensions.width_cm, + length_cm=product_dimensions.length_cm + ) + + generated_image_data = await generate_image_from_files_and_prompt( + image_files=image_bytes_list, + prompt=prompt + ) + + if generated_image_data: + return StreamingResponse(io.BytesIO(generated_image_data), media_type="image/png") + else: + raise HTTPException(status_code=500, detail="Failed to generate image. Check service logs.") diff --git a/app/core/__pycache__/config.cpython-310.pyc b/app/core/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..719da5946dad93678bff339519bd9461da3833a9 Binary files /dev/null and b/app/core/__pycache__/config.cpython-310.pyc differ diff --git a/app/core/config.py b/app/core/config.py new file mode 100644 index 0000000000000000000000000000000000000000..2b0b59523972103aaa1346edc6ca960d98a979fd --- /dev/null +++ b/app/core/config.py @@ -0,0 +1,13 @@ +from pydantic_settings import BaseSettings, SettingsConfigDict +import os + +class Settings(BaseSettings): + GEMINI_API_KEY: str + IMAGE_GEN_MODEL: str = "gemini-2.0-flash-preview-image-generation" + + # Pydantic-settings will automatically look for a .env file + # and load variables from there if they match the field names. + model_config = SettingsConfigDict(env_file=".env", env_file_encoding='utf-8', extra='ignore') + +# Create a single instance of the settings to be used throughout the application +settings = Settings() \ No newline at end of file diff --git a/app/furniture/beds/bed - 2.jpg b/app/furniture/beds/bed - 2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..209ade2e8b7a8453a1d47a3062779925d7cc56d5 Binary files /dev/null and b/app/furniture/beds/bed - 2.jpg differ diff --git a/app/furniture/beds/bed -1.jpeg b/app/furniture/beds/bed -1.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..403081cf2ca5a3499799e13e2a1296ea58dc6b0b Binary files /dev/null and b/app/furniture/beds/bed -1.jpeg differ diff --git a/app/furniture/sofa/sofa-1.jpeg b/app/furniture/sofa/sofa-1.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..44bd170e8d57064bbd3cd78d7284de5d1e9f8618 --- /dev/null +++ b/app/furniture/sofa/sofa-1.jpeg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14c060918ba58721823c73938152a62c5cc35340cae72cc1af4a39eb1b2969c7 +size 439285 diff --git a/app/furniture/sofa/sofa-2.jpg b/app/furniture/sofa/sofa-2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..500b20db9c9a2c39fb30d9b95cceeab57b3e00a9 Binary files /dev/null and b/app/furniture/sofa/sofa-2.jpg differ diff --git a/app/schemas/__pycache__/common_schemas.cpython-310.pyc b/app/schemas/__pycache__/common_schemas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39a660bf8f30a85a3af011f6461b4cacfe327d0a Binary files /dev/null and b/app/schemas/__pycache__/common_schemas.cpython-310.pyc differ diff --git a/app/schemas/common_schemas.py b/app/schemas/common_schemas.py new file mode 100644 index 0000000000000000000000000000000000000000..339c8618243b2288288da3b89880582250033ba3 --- /dev/null +++ b/app/schemas/common_schemas.py @@ -0,0 +1,45 @@ +from pydantic import BaseModel, Field + +class GarmentType(BaseModel): + garment_type: str =Field(description="The type of product", enum=["clothing","jewellery and watches","wallet","shoes","handbags"]) + +class ModelSchema(BaseModel): + gender: str = Field(description="The gender of the person in the image", enum=["female"]) + age_range: str = Field(description="The age range of the person in the image", enum=["teen", "18-25", "26-35", "36-45", "46-55", "56-65", "66+"]) + body_shape: str = Field(description="The body shape of the person in the image", enum=["rectangle", "pear", "hourglass", "inverted_triangle"]) + race_ethnicity: str = Field(description="The race and ethnicity of the person in the image", enum=["white", "black", "asian", "latino", "mixed"]) + pose: str = Field(description="The pose of the person in the image", enum=["standing", "sitting", "lying down", "dancing", "running", "jumping", "walking", "bending", "twisting", "stretching", "flexing", "posing"]) + +class CameraSchema(BaseModel): + view_angle: str = Field(description="The view angle of the camera", enum=["front", "45deg", "left", "right", "back"]) + distance_meters: float = Field(description="The distance of the camera from the person in the image") + focal_length_mm: float = Field(description="The focal length of the camera in millimeters") + aperture_f_number: float = Field(description="The aperture f-number of the camera") + lighting_condition: str = Field(description="The lighting condition of the image", enum=["studio_softbox", "outdoor_sunny", "indoor_warm", "flat"]) + background: str = Field( + description="The background of the image", + enum=[ + "White", + "Lifestyle", + "Beach Settings", + "Cafe Environment", + "Spring Garden", + "Winter Snow", + "Professional Settings" + ] + ) + + +class DimensionSchema(BaseModel): + height_cm: float | None = Field(None, description="Height of the product in centimeters.") + width_cm: float | None = Field(None, description="Width of the product in centimeters.") + length_cm: float | None = Field(None, description="Length of the product in centimeters.") + height_cm1: float | None = Field(None, description="Height of the product in centimeters.") + width_cm1: float | None = Field(None, description="Width of the product in centimeters.") + length_cm1: float | None = Field(None, description="Length of the product in centimeters.") + height_cm2: float | None = Field(None, description="Height of the product in centimeters.") + width_cm2: float | None = Field(None, description="Width of the product in centimeters.") + length_cm2: float | None = Field(None, description="Length of the product in centimeters.") + +class SurfaceType(BaseModel): + surface_type: str = Field(description="The background surface of the image", enum=["beach mat", "café table" ,"bar table"]) \ No newline at end of file diff --git a/app/services/__init__.py b/app/services/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/app/services/__pycache__/__init__.cpython-310.pyc b/app/services/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ec55f4516d70451c2f0920ce1a85d1f16700a65 Binary files /dev/null and b/app/services/__pycache__/__init__.cpython-310.pyc differ diff --git a/app/services/__pycache__/__init__.cpython-38.pyc b/app/services/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8f64b5dfd61e14aab1639f187892a6284c68777 Binary files /dev/null and b/app/services/__pycache__/__init__.cpython-38.pyc differ diff --git a/app/services/__pycache__/image_generation_service.cpython-310.pyc b/app/services/__pycache__/image_generation_service.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0930a5e5b7964e54197fbb329ccb283e85c9c73 Binary files /dev/null and b/app/services/__pycache__/image_generation_service.cpython-310.pyc differ diff --git a/app/services/__pycache__/image_generation_service.cpython-38.pyc b/app/services/__pycache__/image_generation_service.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a232af6ba37c7d675dc996099c943f6d5e0fa8d Binary files /dev/null and b/app/services/__pycache__/image_generation_service.cpython-38.pyc differ diff --git a/app/services/__pycache__/prompt_factory.cpython-310.pyc b/app/services/__pycache__/prompt_factory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..045fd8b9bb81f603086120a69b542032f0101bba Binary files /dev/null and b/app/services/__pycache__/prompt_factory.cpython-310.pyc differ diff --git a/app/services/image_generation_service.py b/app/services/image_generation_service.py new file mode 100644 index 0000000000000000000000000000000000000000..0ac769a39fb6fd8435779db4bf4b248bb144184a --- /dev/null +++ b/app/services/image_generation_service.py @@ -0,0 +1,127 @@ +import os +from google import genai +from google.genai import types +from io import BytesIO +from app.schemas.common_schemas import ModelSchema, CameraSchema +from app.core.config import settings +import mimetypes +import traceback +import tempfile + +# Initialize client and model using the settings object +if not settings.GEMINI_API_KEY: + raise ValueError("GEMINI_API_KEY not found in environment or .env file") + +client = genai.Client(api_key=settings.GEMINI_API_KEY) +IMAGE_GEN_MODEL = settings.IMAGE_GEN_MODEL + +def save_binary_file(filename, data): + """Saves binary data to a file.""" + with open(filename, "wb") as f: + f.write(data) + +async def generate_image_from_files_and_prompt( + image_files: list[tuple[bytes, str]], # Expect list of (bytes, original_filename) + prompt: str +) -> bytes | None: + """ + Generates an image using GenAI based on exactly two input images and model/camera schemas. + Returns the image data as bytes or None if an error occurs. + """ + # if not client.api_key: + # print("[GenAI Error] API Key not configured.") + # return None + + + temp_file_paths = [] + uploaded_file_infos = [] + parts = [] + + try: + for i, (img_bytes, original_filename) in enumerate(image_files): + # Guess mime type from original filename, default if not guessable + mime_type, _ = mimetypes.guess_type(original_filename) + if not mime_type: + mime_type = "application/octet-stream" # Default MIME type + + # Create a named temporary file to get a persistent path + # Suffix helps genai identify file type, though mime_type in upload is better + suffix = os.path.splitext(original_filename)[1] + with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp_file: + tmp_file.write(img_bytes) + temp_file_paths.append(tmp_file.name) + + print(f"[GenAI Info] Uploading temporary file: {temp_file_paths[-1]} with MIME type: {mime_type}") + # Upload the file using its path + temp_file_path = temp_file_paths[-1] + with open(temp_file_paths[-1], "rb") as f: + uploaded_file = client.files.upload(file=temp_file_path) + + uploaded_file_infos.append(uploaded_file) + parts.append(types.Part.from_uri( + file_uri=uploaded_file.uri, + mime_type=uploaded_file.mime_type, # Use mime_type from upload response + )) + print(f"[GenAI Info] File {original_filename} uploaded. URI: {uploaded_file.uri}") + + + prompt = prompt + parts.append(types.Part.from_text(text=prompt)) + + contents = [types.Content(role="user", parts=parts)] + + generate_content_config = types.GenerateContentConfig( + response_modalities=["IMAGE", "TEXT"], # Requesting IMAGE modality + safety_settings=[ + types.SafetySetting(category="HARM_CATEGORY_HARASSMENT", threshold="BLOCK_NONE"), + types.SafetySetting(category="HARM_CATEGORY_HATE_SPEECH", threshold="BLOCK_NONE"), + types.SafetySetting(category="HARM_CATEGORY_SEXUALLY_EXPLICIT", threshold="BLOCK_NONE"), + types.SafetySetting(category="HARM_CATEGORY_DANGEROUS_CONTENT", threshold="BLOCK_NONE"), + ], + ) + + print(f"[GenAI Info] Generating content with model: {IMAGE_GEN_MODEL}") + response = client.models.generate_content( + model=IMAGE_GEN_MODEL, + contents=contents, + config=generate_content_config + ) + + # print(f"[GenAI Response]: {response}") # For debugging the whole response + + if response.candidates: + for candidate in response.candidates: + if candidate.content is not None and hasattr(candidate.content, "parts"): + for part in candidate.content.parts: + if part.inline_data and part.inline_data.data: + print("[GenAI Info] Image data found in response.") + return part.inline_data.data + elif part.text: + print(f"[GenAI Info or Warning] Text response from model: {part.text}") + else: + print("[GenAI Warning] Candidate content is None or missing 'parts'. Full candidate:", candidate) + + print("[GenAI Warning] No image data found directly in response parts.") + # Fallback: Check if the text part contains an error that explains why no image was generated + if response.candidates and response.candidates[0].content.parts and response.candidates[0].content.parts[0].text: + pass # Already printed above + + return None + + # except types.generation_types.BlockedPromptException as e: + # print(f"[GenAI Error] Prompt was blocked: {e}") + # return None + except Exception as e: + print(f"[GenAI Error] Image generation failed: {e}") + # You might want to log the full traceback here for debugging + traceback.print_exc() + return None + finally: + # Clean up temporary files + for path in temp_file_paths: + try: + os.remove(path) + print(f"[GenAI Info] Deleted temporary file: {path}") + except OSError as e: + print(f"[GenAI Error] Failed to delete temporary file {path}: {e}") + \ No newline at end of file diff --git a/app/services/prompt_factory.py b/app/services/prompt_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..910102335274aa0f6bab6c029d86d0626c3979f3 --- /dev/null +++ b/app/services/prompt_factory.py @@ -0,0 +1,145 @@ + +OUTFIT_MATCH_PROMPT_TEMPLATE = """ +Generate an image of a {model_gender} fashion model, with a mid-length wavy hairstyle +Model characteristics: {model_age_range} age range, {model_race_ethnicity} ethnicity, {model_body_shape} body shape. +The model is {model_pose} confidently with arms relaxed, facing forward. +Crucially, the model must be wearing the exact same garments (e.g., dress, or top and bottom) and {garment_type} as shown in the two input reference images. +Photography details: +- Camera view: {camera_view_angle}. +- Distance from subject: {camera_distance_meters} meters. +- Lens: {camera_focal_length_mm}mm focal length. +- Aperture: f/{camera_aperture_f_number}. +- Lighting: {camera_lighting_condition}. +- Background: {camera_background}. +Aim for a realistic, high-quality fashion photograph. +""" + +LIFESTYLE_TRYON_PROMPT_TEMPLATE = """ +Generate an image of a {model_gender} fashion model, with a mid-length wavy hairstyle +Model characteristics: {model_age_range} age range, {model_race_ethnicity} ethnicity, {model_body_shape} body shape. +The model is {model_pose} confidently with arms relaxed, facing forward. +Crucially, the model must be wearing the exact {garment_type} or carrying it (if its handbag/wallet)as shown in the input reference image. +Photography details: +- Camera view: {camera_view_angle}. +- Distance from subject: {camera_distance_meters} meters. +- Lens: {camera_focal_length_mm}mm focal length. +- Aperture: f/{camera_aperture_f_number}. +- Lighting: {camera_lighting_condition}. +- Background: {camera_background}. +Aim for a realistic, high-quality fashion photograph. +""" + +OCCASION_BASED_PROMPT_TEMPLATE = """ +Generate a single high-resolution collage image featuring a {model_gender} fashion model with a mid-length wavy hairstyle. +Model characteristics: {model_age_range} age range, {model_race_ethnicity} ethnicity, {model_body_shape} body shape. +The model is {model_pose} confidently with arms relaxed, facing forward. + +The collage must depict the model must be holding the exact accessory {garment_type} shown in the input reference image, across three distinct occasions: + +1. **Daytime Casual** — Bright natural daylight in an outdoor setting (e.g., street, park, or café). +2. **Evening Social** — Warm, low-light evening setting (e.g., party, restaurant, or event). +3. **Professional Environment** — Clean, minimal indoor office setup with diffused lighting. + +There should be 3 panels, highlighting the {garment_type} Each panel in the collage should maintain the same model features and outfit/accessory, while varying: +- Lighting conditions (natural daylight, ambient evening light, office light). +- Backgrounds and context appropriate to each occasion. +- Slight pose or expression adjustments that match the mood of each setting. + +Photography details for each panel: +- Camera view: {camera_view_angle}. +- Distance from subject: {camera_distance_meters} meters. +- Lens: {camera_focal_length_mm}mm focal length. +- Aperture: f/{camera_aperture_f_number}. + +Ensure the collage composition is visually balanced, realistic, and fashion-editorial quality. The accessory must remain clearly visible and consistent across all settings. +""" +WHAT_FITS_INSIDE_TEMPLATE = """ +A top-view shot of an open bag with dimensions: height: {height_cm} cm, width: {width_cm} cm, length: {length_cm} cm, placed on a neutral flat surface. +Inside the bag are a few realistically fitting personal items selected based on the available internal space. These may include small items such as lipsticks, sunglasses, and a compact book or notebook. +If the bag is large enough, it may also include items like a laptop or a full-size book. +Ensure the items are neatly arranged and clearly visible from the top. Use natural lighting and realistic textures to highlight both the product’s material and its capacity, showcasing how it can be used in everyday life. +""" + +SIZE_COMPARISION_TEMPLATE = """ +A front-facing shot of a bag with dimensions: height: {height_cm} cm, width: {width_cm} cm, length: {length_cm} cm, placed on a neutral flat surface. +Arranged neatly beside the bag are realistically scaled personal items to visually demonstrate size and capacity. +These may include objects such as an iPad, laptop, iPhone, sunglasses, a compact book or notebook, and small accessories like lipsticks. +The items should be clearly visible and aligned next to the bag to provide an intuitive size comparison. +Use natural lighting and realistic textures to highlight the bag's material and context in everyday usage. +""" +JEWELLERY_SIZE_COMPARISION_TEMPLATE= """ +A close-up, front-facing image of a jewellery piece with dimensions: height: {height_cm} cm, width: {width_cm} cm, length: {length_cm} cm, placed on a neutral, clean surface. +To provide a clear and intuitive sense of scale, the jewellery is positioned next to realistically sized everyday objects such as a standard coin (e.g., US quarter, INR ₹5 coin) or an AirPods case. +Ensure the objects are aligned neatly beside the jewellery piece, with accurate proportions and spacing, making the size comparison easy to understand at a glance. +Use natural lighting and high-resolution detail to highlight the material, craftsmanship, and design of the jewellery. +The background should remain minimal and unobtrusive to keep the focus on the item and its relative scale. +""" + +WALLET_SIZE_COMPARISION_TEMPLATE = """ +Generate a side-by-side comparison image of two wallets using the provided input images. +The first wallet has dimensions: height {height_cm1} cm, width {width_cm1} cm, length {length_cm1} cm. +The second wallet has dimensions: height {height_cm2} cm, width {width_cm2} cm, length {length_cm2} cm. +Place both wallets on a neutral flat background, maintaining realistic proportions based on their actual sizes. +Ensure consistent lighting and angle to allow for an accurate visual comparison. +The output should clearly show both wallets side by side to highlight differences in size and design. +""" + +MANNEQUIN_TEMPLATE = """ +Generate a realistic image of a mannequin wearing the input dress. +Use the provided dress image as the design reference. +The mannequin should be in a neutral standing pose, facing forward, with even lighting and a plain, light-colored background to highlight the outfit. +Ensure the fabric, pattern, color, and style of the dress are preserved accurately on the mannequin. +The final image should look like a studio fashion display for catalog or e-commerce use. +""" + +DETAIL_SHOTS_TEMPLATE = """ +Generate a collage of 3 to 4 close-up images showcasing detailed views of the provided fashion product from the input image. +Focus on important fabric and design elements such as stitching, embroidery, texture, patterns, and hardware details like buttons or zippers. +Each image in the collage should highlight a different section of the product, such as unique features, construction details, and material textures. +Maintain realistic lighting and accurately represent the fabric or material. +The final output should be a cohesive, high-quality collage suitable for e-commerce zoom views or fashion catalog detail pages. +""" + +BACKGROUND_EDIT_TEMPLATE = """ +Create a high-quality, realistic product-only visual featuring a {garment_type} placed on a lifelike surface such as a {surface_type}. +The product should be the sole focus, with no human models present. Ensure the background is editable, allowing for easy customization or replacement with other realistic lifestyle settings. +The surface should enhance the product’s appeal without distracting from it. +Photography details: +- Camera view: {camera_view_angle}. +- Distance from subject: {camera_distance_meters} meters. +- Lens: {camera_focal_length_mm}mm focal length. +- Aperture: f/{camera_aperture_f_number}. +- Lighting: {camera_lighting_condition}. +- Background: {camera_background}. +Aim for a realistic, high-quality fashion photograph. +""" + +MULTI_VIEW_TEMPLATE = """ +Generate side, front, back, top and more relevant distinct views (no duplicates) of the fashion product exactly as shown in the input image. The ouput should be clear and same as input and focus on details. +Each view should be unique and clearly separated and there should be no duplicates views, It should include all the relevant detais. +Each section of the collage should accurately represent the product from its respective angle, maintaining realistic lighting, color, and material texture. +The final image should be visually balanced and suitable for e-commerce or catalog use, providing a concise multi-angle reference of the product. +""" + +EDITORIAL_PROMPT_TEMPLATE = """ +As a high-fashion editorial stylist and photographer, create a visually striking, hyper-realistic image of a +{model_race_ethnicity} {model_gender} model in their {model_age_range}, with a {model_body_shape} body type. +The model must be wearing the exact same top or dress as shown in the uploaded image—match the color, fit, fabric, +and all visible details precisely, with no changes or reinterpretation. +Pair this top with a bold, fashion-forward bottom thoughtfully styled to complement the top’s style and color. +Style the outfit and image in a colorful, visually rich palette, using complementary or contrasting colors to create a +bold, modern editorial look. Render the final image in full color, with attention to color harmony and visual impact. +The top must remain the clear focus of the outfit. + +Capture the model in a {model_pose}, evoking the energy and composition of a high-end fashion editorial. +The camera should use a {camera_view_angle} angle from approximately {camera_distance_meters} meters away, +with a focal length of {camera_focal_length_mm}mm and an aperture of f/{camera_aperture_f_number}. +Lighting should be {camera_lighting_condition}, with dramatic, editorial effects like side lighting, deep shadows, +or high-contrast illumination to sculpt the clothing’s structure and add depth. +The background should be {camera_background}. + +Frame the shot creatively, with attention to the lines and proportions of the garments. +The main focus must be on the clothing’s design, especially the top, with all details—fabric, tailoring, and silhouette— +clearly visible and highlighted. The final image must look like a magazine cover or a Zara campaign: bold, artistic, +and visually unforgettable, with the clothing as the star of the composition. +""" diff --git a/generated_image.png b/generated_image.png new file mode 100644 index 0000000000000000000000000000000000000000..4c1e0040b52e3254749fcfc0da7d0ce367221e65 --- /dev/null +++ b/generated_image.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b882b2c55f78530f2fec97793e20066e6ae4c85100e1e27b94675d3ad857613 +size 939344 diff --git a/gradio_app.py b/gradio_app.py new file mode 100644 index 0000000000000000000000000000000000000000..542b3399f087437c97d80051cd09fe8b6ba534b2 --- /dev/null +++ b/gradio_app.py @@ -0,0 +1,1235 @@ +import gradio as gr +import requests +from io import BytesIO +from PIL import Image +import tempfile + +import os +import mimetypes +import google.generativeai as genai +import io +from dotenv import load_dotenv +# already have: from PIL import Image +load_dotenv() +API_KEY = os.environ.get("GEMINI_API_KEY") +if API_KEY: + genai.configure(api_key=API_KEY) +else: + print("Warning: GEMINI_API_KEY not found in .env") +API_BASE = "http://0.0.0.0:5350/api/v1" +BED_IMAGE_PATHS = [ + "/home/zclap/production/fashion-app/vendor-usecase-app/app/furniture/beds/bed -1.jpeg", + "/home/zclap/production/fashion-app/vendor-usecase-app/app/furniture/beds/bed - 2.jpg", +] +SOFA_IMAGE_PATHS = [ + "/home/zclap/production/fashion-app/vendor-usecase-app/app/furniture/sofa/sofa-1.jpg", + "/home/zclap/production/fashion-app/vendor-usecase-app/app/furniture/sofa/sofa-2.jpg", +] + +def pil_to_tempfile(pil_img, suffix=".jpg"): + """Write PIL image to a temporary file and return the filepath. Caller should delete file when done.""" + tmp = tempfile.NamedTemporaryFile(suffix=suffix, delete=False) + try: + pil_img.save(tmp, format="JPEG") + tmp.flush() + tmp.close() + return tmp.name + except Exception: + try: + tmp.close() + except Exception: + pass + raise + +def image_to_bytes_pil(img: Image.Image, fmt="JPEG"): + bio = BytesIO() + img.save(bio, format=fmt) + bio.seek(0) + return bio.getvalue() + +def call_api(endpoint: str, data: dict, main_image: Image.Image = None, extra_files: dict = None, timeout=60): + """ + Sends a multipart POST to API_BASE/endpoint. + - data: dict of form fields + - main_image: PIL image for field "garment_images" (optional) — will be added into files under key 'garment_images' + - extra_files: dict mapping fieldname -> (filename, bytes, mime) OR fieldname -> [ (filename, bytes, mime), ... ] + """ + files = [] + + # If main_image is provided, add it as an entry for 'garment_images' + if main_image is not None: + files.append(("garment_images", ("garment.jpg", image_to_bytes_pil(main_image), "image/jpeg"))) + + if extra_files: + for fieldname, fileval in extra_files.items(): + # fileval may be a single tuple or a list of tuples + if isinstance(fileval, list) or isinstance(fileval, tuple) and isinstance(fileval[0], tuple): + # treat as iterable of tuples + for single in fileval: + files.append((fieldname, single)) + else: + # single tuple + files.append((fieldname, fileval)) + + try: + resp = requests.post(f"{API_BASE}/{endpoint}", data=data, files=files, timeout=timeout) + except Exception as e: + return f"Exception: {e}", None + + if resp.status_code == 200: + try: + result_img = Image.open(BytesIO(resp.content)).convert("RGB") + return None, result_img + except Exception: + # If API returns JSON or text on success + return None, resp.text + else: + return f"Error {resp.status_code}: {resp.text}", None + + +# ---- Shared choices ---- +garment_types = ["clothing", "jewellery and watches", "wallet", "shoes", "handbags"] +genders = ["female"] +ages = ["teen", "18-25", "26-35", "36-45", "46-55", "56-65", "66+"] +body_shapes = ["rectangle", "pear", "hourglass", "inverted_triangle"] +ethnicities = ["white", "black", "asian", "latino", "mixed"] +poses = ["standing", "sitting", "lying down", "dancing", "running", "jumping", "walking", "bending", "twisting", "stretching", "flexing", "posing"] +view_angles = ["front", "45deg", "left", "right", "back"] +lighting_conditions = ["studio_softbox", "outdoor_sunny", "indoor_warm", "flat"] +backgrounds = [ + "White", "Lifestyle", "Beach Settings", "Cafe Environment", "Spring Garden", "Winter Snow", "Professional Settings" +] +surface_types = ["cotton", "silk", "denim", "leather", "synthetic"] + +# Generic shared submit for endpoints that use model settings + single garment image +def submit_shared(endpoint, garment_type, model_gender, model_age_range, model_body_shape, model_race_ethnicity, + model_pose, camera_view_angle, camera_distance_meters, camera_focal_length_mm, camera_aperture_f_number, + camera_lighting_condition, camera_background, garment_image): + data = { + "garment_type": garment_type, + "model_gender": model_gender, + "model_age_range": model_age_range, + "model_body_shape": model_body_shape, + "model_race_ethnicity": model_race_ethnicity, + "model_pose": model_pose, + "camera_view_angle": camera_view_angle, + "camera_distance_meters": camera_distance_meters, + "camera_focal_length_mm": camera_focal_length_mm, + "camera_aperture_f_number": camera_aperture_f_number, + "camera_lighting_condition": camera_lighting_condition, + "camera_background": camera_background, + } + return call_api(endpoint, data, main_image=garment_image) + +# ---------------- UI ---------------- +with gr.Blocks(title="FashionAI Studio") as demo: + # CSS to visually swap columns while allowing us to define settings first in code + gr.HTML( + """ + + """ + ) + + gr.Markdown("# FashionAI Studio") + + # Create Model Settings first (so variables exist), CSS makes it render to the right. + with gr.Row(): + with gr.Column(scale=1, elem_id="right_col"): + gr.HTML('
') + garment_type_sel = gr.Dropdown(garment_types, value=garment_types[0], label="Garment Type") + model_gender_sel = gr.Dropdown(genders, value=genders[0], label="Model Gender") + model_age_sel = gr.Dropdown(ages, value=ages[1], label="Model Age Range") + model_body_sel = gr.Dropdown(body_shapes, value=body_shapes[0], label="Model Body Shape") + model_ethnicity_sel = gr.Dropdown(ethnicities, value=ethnicities[0], label="Model Race & Ethnicity") + model_pose_sel = gr.Dropdown(poses, value=poses[0], label="Model Pose") + view_angle_sel = gr.Dropdown(view_angles, value=view_angles[0], label="View Angle") + distance_num = gr.Number(label="Distance (meters)", value=2) + focal_num = gr.Number(label="Focal Length (mm)", value=50) + aperture_num = gr.Number(label="Aperture (f-number)", value=2.8) + lighting_sel = gr.Dropdown(lighting_conditions, value=lighting_conditions[0], label="Lighting Condition") + background_sel = gr.Dropdown(backgrounds, value=backgrounds[0], label="Background") + + # Main workspace (left) + with gr.Column(scale=2, elem_id="left_col"): + + # Helper for simple subtab that uses shared settings + garment image + def make_shared_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + img = gr.Image(label="Garment Image", type="pil") + btn = gr.Button(f"Generate — {tab_label}") + with gr.Column(scale=2): + out_img = gr.Image(label="Generated Result") + err = gr.Textbox(label="Error", interactive=False) + + btn.click( + fn=lambda *args, ep=endpoint: submit_shared(ep, *args), + inputs=[ + garment_type_sel, model_gender_sel, model_age_sel, model_body_sel, model_ethnicity_sel, + model_pose_sel, view_angle_sel, distance_num, focal_num, aperture_num, + lighting_sel, background_sel, img + ], + outputs=[err, out_img] + ) + + # Helper for background_edit (special inputs) + def make_background_edit_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + gtype = gr.Dropdown(garment_types, value=garment_types[0], label="Garment Type") + surface = gr.Dropdown(surface_types, value=surface_types[0], label="Surface Type") + view = gr.Dropdown(view_angles, value=view_angles[0], label="Camera View Angle") + distance = gr.Number(label="Camera Distance (meters)", value=2.0) + focal = gr.Number(label="Camera Focal Length (mm)", value=50) + aperture = gr.Number(label="Camera Aperture (f-number)", value=2.8) + lighting = gr.Dropdown(["natural", "studio", "warm"], value="studio", label="Camera Lighting Condition") + background = gr.Dropdown(backgrounds, value=backgrounds[0], label="Camera Background") + img = gr.Image(label="Garment Image", type="pil") + btn = gr.Button("Generate Background Edit") + with gr.Column(scale=2): + out_img = gr.Image(label="Generated Result") + err = gr.Textbox(label="Error", interactive=False) + + def bg_submit(endpoint, gtype, surface, view, distance, focal, aperture, lighting, background, img): + data = { + "garment_type": gtype, + "surface_type": surface, + "camera_view_angle": view, + "camera_distance_meters": distance, + "camera_focal_length_mm": focal, + "camera_aperture_f_number": aperture, + "camera_lighting_condition": lighting, + "camera_background": background, + } + return call_api(endpoint, data, main_image=img) + + btn.click( + fn=lambda *args, ep=endpoint: bg_submit(ep, *args), + inputs=[gtype, surface, view, distance, focal, aperture, lighting, background, img], + outputs=[err, out_img] + ) + + # Helper for bags -> what fits inside (dimensions) + def make_whatfits_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + length = gr.Number(label="Length (cm)", value=20) + width = gr.Number(label="Width (cm)", value=10) + height = gr.Number(label="Height (cm)", value=8) + img = gr.Image(label="Bag Image (optional)", type="pil") + btn = gr.Button("Generate — What Fits Inside") + with gr.Column(scale=2): + out_txt = gr.Textbox(label="Result / Suggestions", interactive=False) + out_img = gr.Image(label="Preview (if any)") + err = gr.Textbox(label="Error", interactive=False) + + def wf_submit(endpoint, length, width, height, img): + """ + - Map your UI fields to the expected API field names. + - Always return three values: (err_str_or_None, out_image_or_None, out_text_or_empty_str) + """ + # Map to expected backend keys (change these if your backend uses different names) + data = { + "product_length": length, + "product_width": width, + "product_height": height, + } + + extra = {} + # Some backends expect the main image field to be 'garment_images' in files. + if img is not None: + extra["garment_images"] = ("bag.jpg", image_to_bytes_pil(img), "image/jpeg") + + err_msg, resp = call_api(endpoint, data, main_image=None, extra_files=extra) + + # If call_api reported an error, return that as the error textbox + empty image/text outputs + if err_msg: + return err_msg, None, "" + + # resp may be a PIL.Image or text + try: + if isinstance(resp, Image.Image): + return None, resp, "" + except Exception: + pass + + # fallback: return textual response into out_txt + return None, None, str(resp) + + btn.click(fn=lambda *args, ep=endpoint: wf_submit(ep, *args), + inputs=[length, width, height, img], + outputs=[err, out_img, out_txt]) + + + # Helper for size comparison (dimensions) + # Replace your current make_sizecompare_subtab with this version: + + def make_sizecompare_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + ref_img = gr.Image(label="Reference Product Image (optional)", type="pil") + ref_l = gr.Number(label="Reference Length (cm)", value=10) + ref_w = gr.Number(label="Reference Width (cm)", value=5) + ref_h = gr.Number(label="Reference Height (cm)", value=2) + + cmp_img = gr.Image(label="Compare Product Image (optional)", type="pil") + cmp_l = gr.Number(label="Compare Length (cm)", value=12) + cmp_w = gr.Number(label="Compare Width (cm)", value=6) + cmp_h = gr.Number(label="Compare Height (cm)", value=3) + + btn = gr.Button("Compare Sizes") + with gr.Column(scale=2): + out_img = gr.Image(label="Comparison Visualization / Preview") + out_txt = gr.Textbox(label="Comparison Result", interactive=False) + err = gr.Textbox(label="Error", interactive=False) + + def size_submit(ep, garment_type_val, ref_img, ref_l, ref_w, ref_h, cmp_img, cmp_l, cmp_w, cmp_h): + # Map UI label -> backend expected token (edit this mapping to match your backend) + mapping = { + "clothing": "clothing", + "jewellery and watches": "jewellery", + "wallet": "wallet", + "shoes": "shoes", + "handbags": "handbag", # you might need "bag" or "handbag" depending on backend + "bags": "handbag", + } + + # Defensive: ensure we have a garment type + if not garment_type_val: + return "Please select a garment type in the Model Settings (right panel).", None, "" + + # translate using mapping; fall back to original value + send_garment_type = mapping.get(str(garment_type_val).strip().lower(), garment_type_val) + + # The backend requires at least one image as 'garment_images' + if (ref_img is None) and (cmp_img is None): + return "Please upload at least one product image (reference or compare).", None, "" + + # Prepare payload + data = { + "garment_type": send_garment_type, + "ref_length_cm": ref_l, + "ref_width_cm": ref_w, + "ref_height_cm": ref_h, + "cmp_length_cm": cmp_l, + "cmp_width_cm": cmp_w, + "cmp_height_cm": cmp_h, + } + + # Build files ensuring 'garment_images' exists (required by your backend) + extra_files = {} + if ref_img is not None: + extra_files["garment_images"] = ("ref.jpg", image_to_bytes_pil(ref_img), "image/jpeg") + extra_files["ref_image"] = ("ref.jpg", image_to_bytes_pil(ref_img), "image/jpeg") + if cmp_img is not None: + if "garment_images" not in extra_files: + extra_files["garment_images"] = ("cmp.jpg", image_to_bytes_pil(cmp_img), "image/jpeg") + extra_files["cmp_image"] = ("cmp.jpg", image_to_bytes_pil(cmp_img), "image/jpeg") + + # debug log: print what we are sending + print("Calling size endpoint:", ep, "garment_type_sent:", send_garment_type, "data:", data.keys(), "files:", list(extra_files.keys())) + + err_msg, resp = call_api(ep, data, main_image=None, extra_files=extra_files) + + # If server returned an error, include the garment_type we sent to help debugging + if err_msg: + # Show server message + what we attempted to send + return f"{err_msg}\n(garment_type sent: {send_garment_type})", None, "" + + # resp may be an image or text + if isinstance(resp, Image.Image): + return None, resp, "" + return None, None, str(resp) + + + # Note: we capture the endpoint via default arg ep=endpoint in the lambda below. + # Inputs list must contain component objects only (no plain strings). + btn.click( + fn=lambda garment_type_val, ref_img, ref_l, ref_w, ref_h, cmp_img, cmp_l, cmp_w, cmp_h, ep=endpoint: + size_submit(ep, garment_type_val, ref_img, ref_l, ref_w, ref_h, cmp_img, cmp_l, cmp_w, cmp_h), + inputs=[garment_type_sel, ref_img, ref_l, ref_w, ref_h, cmp_img, cmp_l, cmp_w, cmp_h], + outputs=[err, out_img, out_txt] + ) + + + + + # Helper for Outfit & Product Visualization (bag image + additional product image) + def make_outfit_product_viz_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + bag_img = gr.Image(label="Bag Image (primary)", type="pil") + add_img = gr.Image(label="Additional Product Image (secondary)", type="pil") + btn = gr.Button("Generate Outfit & Product Visualization") + with gr.Column(scale=2): + out_img = gr.Image(label="Visualization") + err = gr.Textbox(label="Error", interactive=False) + + def opv_submit( + ep, + garment_type_val, + model_gender_val, + model_age_val, + model_body_val, + model_ethnicity_val, + model_pose_val, + camera_view_val, + camera_distance_val, + camera_focal_val, + camera_aperture_val, + camera_lighting_val, + camera_background_val, + bag_img, + add_img, + ): + # Required fields check + missing = [] + if not garment_type_val: + missing.append("garment_type") + # require exactly two images + if bag_img is None or add_img is None: + missing.append("two garment images (bag_img and add_img)") + + required_fields = { + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + for k, v in required_fields.items(): + if v is None or (isinstance(v, str) and not str(v).strip()): + missing.append(k) + + if missing: + return f"Missing required fields: {', '.join(missing)}", None + + data = { + "garment_type": garment_type_val, + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + + # Build extra_files: put both images under 'garment_images' + extra_files = {} + # supply a list of tuples so call_api will send two files under the same field + extra_files["garment_images"] = [ + ("bag_primary.jpg", image_to_bytes_pil(bag_img), "image/jpeg"), + ("bag_secondary.jpg", image_to_bytes_pil(add_img), "image/jpeg") + ] + + err_msg, resp = call_api(ep, data, main_image=None, extra_files=extra_files) + + if err_msg: + return err_msg, None + + if isinstance(resp, Image.Image): + return None, resp + return None, None + + btn.click( + fn=lambda *args, ep=endpoint: opv_submit( + ep, + args[0], # garment_type_sel + args[1], # model_gender_sel + args[2], # model_age_sel + args[3], # model_body_sel + args[4], # model_ethnicity_sel + args[5], # model_pose_sel + args[6], # view_angle_sel + args[7], # distance_num + args[8], # focal_num + args[9], # aperture_num + args[10], # lighting_sel + args[11], # background_sel + args[12], # bag_img + args[13], # add_img + ), + inputs=[ + garment_type_sel, model_gender_sel, model_age_sel, model_body_sel, model_ethnicity_sel, + model_pose_sel, view_angle_sel, distance_num, focal_num, aperture_num, + lighting_sel, background_sel, bag_img, add_img + ], + outputs=[err, out_img] + ) + + + + # Wallets: interactive size guide (two wallet images + dims) + def make_wallet_sizeguide_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + wallet1 = gr.Image(label="Upload wallet image 1", type="pil") + l1 = gr.Number(label="First Image Length (cm)", value=10) + w1 = gr.Number(label="First Image Width (cm)", value=7) + h1 = gr.Number(label="First Image Height (cm)", value=2) + wallet2 = gr.Image(label="Upload wallet image 2", type="pil") + l2 = gr.Number(label="Second Image Length (cm)", value=10) + w2 = gr.Number(label="Second Image Width (cm)", value=7) + h2 = gr.Number(label="Second Image Height (cm)", value=2) + btn = gr.Button("Generate Interactive Size Guide") + with gr.Column(scale=2): + out_img = gr.Image(label="Guide / Visualization") + out_txt = gr.Textbox(label="Notes", interactive=False) + err = gr.Textbox(label="Error", interactive=False) + + def walletsize_submit(endpoint, w1_img, l1, w1_, h1_, w2_img, l2, w2_, h2_): + data = { + "first_length_cm": l1, + "first_width_cm": w1_, + "first_height_cm": h1_, + "second_length_cm": l2, + "second_width_cm": w2_, + "second_height_cm": h2_, + } + extra = {} + if w1_img is not None: + extra["wallet_image_1"] = ("wallet1.jpg", image_to_bytes_pil(w1_img), "image/jpeg") + if w2_img is not None: + extra["wallet_image_2"] = ("wallet2.jpg", image_to_bytes_pil(w2_img), "image/jpeg") + return call_api(endpoint, data, main_image=None, extra_files=extra) + + btn.click(fn=lambda *args, ep=endpoint: walletsize_submit(ep, *args), + inputs=[wallet1, l1, w1, h1, wallet2, l2, w2, h2], + outputs=[err, out_img, out_txt]) + + # Wallets: cross-category pairing (wallet image + garment image) + def make_wallet_crosspair_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + wallet_img = gr.Image(label="Wallet Image", type="pil") + garment_img = gr.Image(label="Garment Image", type="pil") + btn = gr.Button("Generate Cross-Category Pairing") + with gr.Column(scale=2): + out_img = gr.Image(label="Visualization") + err = gr.Textbox(label="Error", interactive=False) + + def wc_submit( + ep, + garment_type_val, + model_gender_val, + model_age_val, + model_body_val, + model_ethnicity_val, + model_pose_val, + camera_view_val, + camera_distance_val, + camera_focal_val, + camera_aperture_val, + camera_lighting_val, + camera_background_val, + wallet_img, + garment_img, + ): + # Validate required fields + missing = [] + if not garment_type_val: + missing.append("garment_type") + # at least one image is required (server also expects garment_images) + if wallet_img is None and garment_img is None: + missing.append("at least one image (wallet or garment)") + + required_fields = { + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + for k, v in required_fields.items(): + if v is None or (isinstance(v, str) and not str(v).strip()): + missing.append(k) + + if missing: + return f"Missing required fields: {', '.join(missing)}", None + + # Prepare payload with all required model/camera fields + data = { + "garment_type": garment_type_val, + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + + # Build extra_files: put available images under 'garment_images' (list) + extra_files = {} + images_list = [] + if wallet_img is not None: + images_list.append(("wallet.jpg", image_to_bytes_pil(wallet_img), "image/jpeg")) + if garment_img is not None: + images_list.append(("garment.jpg", image_to_bytes_pil(garment_img), "image/jpeg")) + + # Must send at least one under 'garment_images' — send as list so call_api sends multiple files if present + if images_list: + extra_files["garment_images"] = images_list + + # Optionally also attach them under named keys if backend accepts them + if wallet_img is not None: + extra_files["wallet_image"] = ("wallet.jpg", image_to_bytes_pil(wallet_img), "image/jpeg") + if garment_img is not None: + extra_files["garment_image"] = ("garment.jpg", image_to_bytes_pil(garment_img), "image/jpeg") + + # Debug log + print("Calling cross-pair endpoint:", ep, "garment_type_sent:", garment_type_val, "files:", list(extra_files.keys())) + + err_msg, resp = call_api(ep, data, main_image=None, extra_files=extra_files) + + if err_msg: + return err_msg, None + + if isinstance(resp, Image.Image): + return None, resp + return None, None + + btn.click( + fn=lambda *args, ep=endpoint: wc_submit( + ep, + args[0], # garment_type_sel + args[1], # model_gender_sel + args[2], # model_age_sel + args[3], # model_body_sel + args[4], # model_ethnicity_sel + args[5], # model_pose_sel + args[6], # view_angle_sel + args[7], # distance_num + args[8], # focal_num + args[9], # aperture_num + args[10], # lighting_sel + args[11], # background_sel + args[12], # wallet_img + args[13], # garment_img + ), + inputs=[ + garment_type_sel, model_gender_sel, model_age_sel, model_body_sel, model_ethnicity_sel, + model_pose_sel, view_angle_sel, distance_num, focal_num, aperture_num, + lighting_sel, background_sel, wallet_img, garment_img + ], + outputs=[err, out_img] + ) + + + # Jewellery: outfit-to-jewelry match (jewelry image + garment image) + def make_jewellery_match_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + jewelry_img = gr.Image(label="Jewelry Image", type="pil") + garment_img = gr.Image(label="Garment Image", type="pil") + btn = gr.Button("Generate Match Visualization") + with gr.Column(scale=2): + out_img = gr.Image(label="Visualization") + err = gr.Textbox(label="Error", interactive=False) + + def jm_submit( + ep, + garment_type_val, + model_gender_val, + model_age_val, + model_body_val, + model_ethnicity_val, + model_pose_val, + camera_view_val, + camera_distance_val, + camera_focal_val, + camera_aperture_val, + camera_lighting_val, + camera_background_val, + jewelry_img, + garment_img, + ): + # Validate required fields + missing = [] + if not garment_type_val: + missing.append("garment_type") + # at least one image must be provided as garment_images + if jewelry_img is None and garment_img is None: + missing.append("at least one image (jewelry or garment)") + + required_fields = { + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + for k, v in required_fields.items(): + if v is None or (isinstance(v, str) and not str(v).strip()): + missing.append(k) + + if missing: + return f"Missing required fields: {', '.join(missing)}", None + + # Build payload + data = { + "garment_type": garment_type_val, + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + + # Build files: attach both images under 'garment_images' (list) + images_list = [] + if jewelry_img is not None: + images_list.append(("jewelry.jpg", image_to_bytes_pil(jewelry_img), "image/jpeg")) + if garment_img is not None: + images_list.append(("garment.jpg", image_to_bytes_pil(garment_img), "image/jpeg")) + + extra_files = {} + if images_list: + extra_files["garment_images"] = images_list + # Also attach named keys for compatibility + if jewelry_img is not None: + extra_files["jewelry_image"] = ("jewelry.jpg", image_to_bytes_pil(jewelry_img), "image/jpeg") + if garment_img is not None: + extra_files["garment_image"] = ("garment.jpg", image_to_bytes_pil(garment_img), "image/jpeg") + + # Debug + print("Calling outfit->jewelry endpoint:", ep, "garment_type:", garment_type_val, "files:", list(extra_files.keys())) + + err_msg, resp = call_api(ep, data, main_image=None, extra_files=extra_files) + if err_msg: + return err_msg, None + + if isinstance(resp, Image.Image): + return None, resp + # fallback: show textual response in err box + return None, None + + btn.click( + fn=lambda *args, ep=endpoint: jm_submit( + ep, + args[0], # garment_type_sel + args[1], # model_gender_sel + args[2], # model_age_sel + args[3], # model_body_sel + args[4], # model_ethnicity_sel + args[5], # model_pose_sel + args[6], # view_angle_sel + args[7], # distance_num + args[8], # focal_num + args[9], # aperture_num + args[10], # lighting_sel + args[11], # background_sel + args[12], # jewelry_img + args[13], # garment_img + ), + inputs=[ + garment_type_sel, model_gender_sel, model_age_sel, model_body_sel, model_ethnicity_sel, + model_pose_sel, view_angle_sel, distance_num, focal_num, aperture_num, + lighting_sel, background_sel, jewelry_img, garment_img + ], + outputs=[err, out_img] + ) + + + # Jewellery: size comparison with dimensions + def make_jewellery_size_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + jewelry_img = gr.Image(label="Jewelry Image", type="pil") + l = gr.Number(label="Length (cm)") + w = gr.Number(label="Width (cm)") + h = gr.Number(label="Height (cm)") + btn = gr.Button("Generate Size Comparison") + with gr.Column(scale=2): + out_img = gr.Image(label="Size Comparison Result") + err = gr.Textbox(label="Error", interactive=False) + + def js_submit(ep, garment_type_val, jewelry_img, l, w, h): + # Validate required fields + missing = [] + if not garment_type_val: + missing.append("garment_type") + if jewelry_img is None: + missing.append("garment_images (jewelry image)") + + if missing: + return f"Missing required fields: {', '.join(missing)}", None + + # Build payload using expected keys + data = { + "garment_type": garment_type_val, + "length_cm": l, + "width_cm": w, + "height_cm": h, + } + + # Files: send the jewelry image as 'garment_images' (server expects this) + extra_files = {} + extra_files["garment_images"] = [("jewelry.jpg", image_to_bytes_pil(jewelry_img), "image/jpeg")] + # also attach under a named key for compatibility + extra_files["jewelry_image"] = ("jewelry.jpg", image_to_bytes_pil(jewelry_img), "image/jpeg") + + # Debug log (optional) + print("Calling jewellery size endpoint:", ep, "garment_type_sent:", garment_type_val, "data_keys:", data.keys()) + + err_msg, resp = call_api(ep, data, main_image=None, extra_files=extra_files) + + if err_msg: + return err_msg, None + + # If the API returns an image, show it in out_img + if isinstance(resp, Image.Image): + return None, resp + + # Otherwise show textual response as an error/info message and no image + return None, None + + btn.click( + fn=lambda *args, ep=endpoint: js_submit(ep, *args), + inputs=[garment_type_sel, jewelry_img, l, w, h], + outputs=[err, out_img] + ) + + + + # Shoes: outfit match preview (shoe image + garment image) + def make_shoes_outfit_subtab(tab_label, endpoint): + with gr.Tab(tab_label): + with gr.Row(): + with gr.Column(scale=1): + shoe_img = gr.Image(label="Shoe Image", type="pil") + garment_img = gr.Image(label="Garment Image", type="pil") + btn = gr.Button("Generate Outfit Match Preview") + with gr.Column(scale=2): + out_img = gr.Image(label="Visualization") + err = gr.Textbox(label="Error", interactive=False) + + def so_submit( + ep, + garment_type_val, + model_gender_val, + model_age_val, + model_body_val, + model_ethnicity_val, + model_pose_val, + camera_view_val, + camera_distance_val, + camera_focal_val, + camera_aperture_val, + camera_lighting_val, + camera_background_val, + shoe_img, + garment_img, + ): + # Validate required fields + missing = [] + if not garment_type_val: + missing.append("garment_type") + # ensure at least one image + if shoe_img is None and garment_img is None: + missing.append("at least one image (shoe or garment)") + + required_fields = { + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + for k, v in required_fields.items(): + if v is None or (isinstance(v, str) and not str(v).strip()): + missing.append(k) + + if missing: + return f"Missing required fields: {', '.join(missing)}", None + + # Build payload using required keys + data = { + "garment_type": garment_type_val, + "model_gender": model_gender_val, + "model_age_range": model_age_val, + "model_body_shape": model_body_val, + "model_race_ethnicity": model_ethnicity_val, + "model_pose": model_pose_val, + "camera_view_angle": camera_view_val, + "camera_distance_meters": camera_distance_val, + "camera_focal_length_mm": camera_focal_val, + "camera_aperture_f_number": camera_aperture_val, + "camera_lighting_condition": camera_lighting_val, + "camera_background": camera_background_val, + } + + # Prepare files: put available images under 'garment_images' as a list + images_list = [] + if shoe_img is not None: + images_list.append(("shoe.jpg", image_to_bytes_pil(shoe_img), "image/jpeg")) + if garment_img is not None: + images_list.append(("garment.jpg", image_to_bytes_pil(garment_img), "image/jpeg")) + + extra_files = {} + if images_list: + extra_files["garment_images"] = images_list + # Also send named keys for compatibility + if shoe_img is not None: + extra_files["shoe_image"] = ("shoe.jpg", image_to_bytes_pil(shoe_img), "image/jpeg") + if garment_img is not None: + extra_files["garment_image"] = ("garment.jpg", image_to_bytes_pil(garment_img), "image/jpeg") + + # Debug log + print("Calling shoes outfit-match endpoint:", ep, "garment_type:", garment_type_val, "files:", list(extra_files.keys())) + + err_msg, resp = call_api(ep, data, main_image=None, extra_files=extra_files) + if err_msg: + return err_msg, None + + if isinstance(resp, Image.Image): + return None, resp + return None, None + + btn.click( + fn=lambda *args, ep=endpoint: so_submit( + ep, + args[0], # garment_type_sel + args[1], # model_gender_sel + args[2], # model_age_sel + args[3], # model_body_sel + args[4], # model_ethnicity_sel + args[5], # model_pose_sel + args[6], # view_angle_sel + args[7], # distance_num + args[8], # focal_num + args[9], # aperture_num + args[10], # lighting_sel + args[11], # background_sel + args[12], # shoe_img + args[13], # garment_img + ), + inputs=[ + garment_type_sel, model_gender_sel, model_age_sel, model_body_sel, model_ethnicity_sel, + model_pose_sel, view_angle_sel, distance_num, focal_num, aperture_num, + lighting_sel, background_sel, shoe_img, garment_img + ], + outputs=[err, out_img] + ) + + + + # Now build the tabs & subtabs + with gr.Tabs(): + # Clothes + with gr.Tab("👕 Clothes"): + with gr.Tabs(): + make_shared_subtab("Lifestyle Images", "tryon") + make_shared_subtab("Studio & Minimal", "tryon") + make_shared_subtab("Flat Lay & Ghost", "mannequin") + make_shared_subtab("Editorial & Fashion", "editorial") + make_shared_subtab("Detail Shots", "detail") + make_background_edit_subtab("Background Edit", "background_edit") + + # Bags + with gr.Tab("👜 Bags"): + with gr.Tabs(): + make_shared_subtab("Model Try-On Simulation", "tryon") + make_whatfits_subtab("What Fits Inside", "whatfits") + make_sizecompare_subtab("Smart Size Comparison", "size") + make_outfit_product_viz_subtab("Outfit & Product Visualization", "outfit-match") + make_shared_subtab("Multiview", "multi_view") + make_background_edit_subtab("Background Edit", "background_edit") + make_shared_subtab("Detail Shots", "detail") + + # Wallets + with gr.Tab("👛 Wallets"): + with gr.Tabs(): + make_wallet_sizeguide_subtab("Interactive Size Guide", "walletsize") + make_wallet_crosspair_subtab("Cross-Category Pairing", "outfit-match") + make_shared_subtab("Occasion-Based Styling", "occasion") + make_shared_subtab("Detail Shots", "detail") + make_shared_subtab("Multiview", "multi_view") + make_background_edit_subtab("Background Edit", "background_edit") + + # Jewellery & Watches + with gr.Tab("💍 Jewelry & Watches"): + with gr.Tabs(): + make_shared_subtab("AI Model Shot Generator", "tryon") + make_jewellery_match_subtab("Outfit-to-Jewelry Match Visualizer", "outfit-match") + make_shared_subtab("Occasion-Based Styling Suggestions", "occasion") + make_jewellery_size_subtab = make_jewellery_size_subtab # alias to avoid redefinition issues + make_jewellery_size_subtab("Visual Size Comparison Tool", "size") + + # Shoes + with gr.Tab("👟 Shoes"): + with gr.Tabs(): + make_shared_subtab("Model Shot Generator", "tryon") + make_shoes_outfit_subtab("Outfit Match Preview", "outfit-match") + make_shared_subtab("Multiview", "multi_view") + make_background_edit_subtab("Background Edit", "background_edit") + make_shared_subtab("Detail Shots", "detail") + with gr.Tab("🛋 Furniture"): + # --- Reusable helper (make sure it's defined once in the file) --- + def analyze_room(image_path: str): + if not API_KEY: + return "GEMINI_API_KEY not configured.", None, gr.update(visible=False) + if not image_path: + return "Please upload an image first.", None, gr.update(visible=False) + + model = genai.GenerativeModel(model_name="gemini-2.5-pro") + prompt = """ + Based on the image provided: + In one word, what type of room is this? + In one word, what is the hero object in the room (the object that covers the most area)? + """ + try: + if not os.path.exists(image_path): + return f"Error: Image file not found at '{image_path}'", None, gr.update(visible=False) + + image_file = genai.upload_file(path=image_path) + response = model.generate_content([prompt, image_file]) + response_text = response.text.strip().lower() + try: + genai.delete_file(image_file.name) + except Exception: + pass + + # load example images as PIL images (in-memory) for the gallery + if "bedroom" in response_text or "bed" in response_text: + gallery_images = [Image.open(p).convert("RGB") for p in BED_IMAGE_PATHS if os.path.exists(p)] + if not gallery_images: + return "No example bed images found.", "bed", gr.update(visible=False) + return "Analysis: Bedroom\nHero Object: Bed", "bed", gr.update(visible=True, value=gallery_images) + elif "livingroom" in response_text or "sofa" in response_text or "living room" in response_text: + gallery_images = [Image.open(p).convert("RGB") for p in SOFA_IMAGE_PATHS if os.path.exists(p)] + if not gallery_images: + return "No example sofa images found.", "sofa", gr.update(visible=False) + return "Analysis: Living Room\nHero Object: Sofa", "sofa", gr.update(visible=True, value=gallery_images) + else: + return "Could not determine room type.", None, gr.update(visible=False) + + except Exception as e: + try: + if 'image_file' in locals() and hasattr(image_file, 'name'): + genai.delete_file(image_file.name) + except Exception: + pass + return f"An error occurred: {e}", None, gr.update(visible=False) + + + + def replace_object(original_image_arg, example_image_arg, hero_object): + """ + original_image_arg: filepath (str) or PIL.Image + example_image_arg: PIL.Image or filepath (str) + """ + if not API_KEY: + return "GEMINI_API_KEY not configured.", None + if original_image_arg is None or example_image_arg is None: + return "Please upload an image and select an example style.", None + if not hero_object: + return "Analysis must be run first to identify the object.", None + + model = genai.GenerativeModel(model_name="gemini-2.5-flash-image") + prompt = f"Replace the {hero_object} in the first image with the style and type of the {hero_object} from the second image. Maintain the original room's background, lighting, and perspective." + + orig_tmp = None + ex_tmp = None + original_image_file = None + example_image_file = None + try: + # handle original (can be filepath or PIL) + if isinstance(original_image_arg, Image.Image): + orig_tmp = pil_to_tempfile(original_image_arg) + original_image_file = genai.upload_file(path=orig_tmp) + elif isinstance(original_image_arg, str) and os.path.exists(original_image_arg): + original_image_file = genai.upload_file(path=original_image_arg) + else: + return "Original image invalid.", None + + # handle example (can be PIL or filepath) + if isinstance(example_image_arg, Image.Image): + ex_tmp = pil_to_tempfile(example_image_arg) + example_image_file = genai.upload_file(path=ex_tmp) + elif isinstance(example_image_arg, str) and os.path.exists(example_image_arg): + example_image_file = genai.upload_file(path=example_image_arg) + else: + return "Selected example image invalid.", None + + response = model.generate_content([ + prompt, + original_image_file, + example_image_file + ]) + + # cleanup uploaded files on genai side + try: + if original_image_file and hasattr(original_image_file, 'name'): + genai.delete_file(original_image_file.name) + if example_image_file and hasattr(example_image_file, 'name'): + genai.delete_file(example_image_file.name) + except Exception: + pass + + if not getattr(response, "candidates", None): + return "Generation failed: no candidates in response.", None + + generated_image_data = response.candidates[0].content.parts[0].inline_data.data + generated_image = Image.open(io.BytesIO(generated_image_data)).convert("RGB") + return "Image generated successfully.", generated_image + + except Exception as e: + # debug print + print("replace_object error:", e) + if 'response' in locals(): + print("RAW RESPONSE:", response) + return f"An unexpected error occurred: {e}", None + + finally: + # cleanup temp files + for p in (orig_tmp, ex_tmp): + if p and os.path.exists(p): + try: + os.remove(p) + except Exception: + pass + + + # --- UI layout for the Furniture tab --- + gr.Markdown("### Furniture / Room Styler") + with gr.Row(): + with gr.Column(scale=1): + gr.Markdown("#### Step 1 — Upload & Analyze") + furniture_input_image = gr.Image(type="filepath", label="Upload Room Image") + furniture_analyze_button = gr.Button("Analyze Room", variant="secondary") + furniture_analysis_output = gr.Textbox(label="Analysis Result", interactive=False) + + # states + furniture_hero_state = gr.State() + furniture_selected_example_state = gr.State() + + with gr.Column(scale=2): + gr.Markdown("#### Step 2 — Choose Style Example") + furniture_example_gallery = gr.Gallery( + label="Example Images (select one)", + visible=False, + columns=2, + height="auto", + object_fit="contain" + ) + + gr.Markdown("---") + gr.Markdown("#### Step 3 — Generate Replacement") + furniture_replace_button = gr.Button("Generate New Room Image", variant="primary") + + with gr.Row(): + furniture_output_text = gr.Textbox(label="Generation Status", interactive=False, scale=1) + furniture_output_image = gr.Image(label="Your New Room", interactive=False, scale=2) + + # Wire up events + furniture_analyze_button.click( + fn=analyze_room, + inputs=furniture_input_image, + outputs=[furniture_analysis_output, furniture_hero_state, furniture_example_gallery] + ) + + # gallery selection returns object; we extract path -> store in state + def get_selected_image(evt: gr.SelectData): + """ + Accepts a gallery selection event. + Returns: either a PIL.Image (preferred) or a filepath string. + """ + try: + val = evt.value + # If gradio returned a PIL image directly + if isinstance(val, Image.Image): + return val + # If it's a simple string path + if isinstance(val, str): + return val + # If it's a dict wrapper, try to extract + if isinstance(val, dict): + img = val.get("image") + if isinstance(img, Image.Image): + return img + if isinstance(img, dict) and "path" in img: + return img["path"] + # sometimes gallery returns {"name":..., "data":...} + # best-effort: return the PIL object if present + for v in val.values(): + if isinstance(v, Image.Image): + return v + # fallback: stringify + return str(val) + # fallback + return val + except Exception as e: + print("Error extracting selected example:", e) + return None + + + furniture_example_gallery.select( + fn=get_selected_image, + inputs=None, + outputs=furniture_selected_example_state + ) + + furniture_replace_button.click( + fn=replace_object, + inputs=[furniture_input_image, furniture_selected_example_state, furniture_hero_state], + outputs=[furniture_output_text, furniture_output_image] + ) + + +if __name__ == "__main__": + demo.launch(server_name="0.0.0.0", server_port=7860, allowed_paths=["/home/zclap/production/fashion-app/vendor-usecase-app/app/furniture"]) + diff --git a/main.py b/main.py new file mode 100644 index 0000000000000000000000000000000000000000..83d0fa3aeeee0c40efb330930e1f789b943c6cf5 --- /dev/null +++ b/main.py @@ -0,0 +1,37 @@ +from fastapi import FastAPI +from fastapi.middleware.cors import CORSMiddleware +from app.api.routers.v1 import outfitmatch, health, tryon, occasion, whatfits, size_comparision, mannequin, detail_shots,two_object_size,background_edit,multi_view,editorial + +app = FastAPI( + title="Vendor Usecase App", + description="API for various vendor usecases including outfit matching.", + version="0.1.0" +) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Change "*" to specific origins for production + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +# Include routers +app.include_router(outfitmatch.router, prefix="/api/v1", tags=["Outfit Match"]) +app.include_router(tryon.router, prefix="/api/v1", tags=["TRY-ON"]) +app.include_router(occasion.router, prefix="/api/v1", tags=["occasion-based-styling"]) +app.include_router(whatfits.router, prefix="/api/v1", tags=["what-fits"]) +app.include_router(size_comparision.router, prefix="/api/v1", tags=["size-comparision"]) +app.include_router(two_object_size.router, prefix="/api/v1", tags=["wallet-size-comparision"]) +app.include_router(mannequin.router, prefix="/api/v1", tags=["MANNEQUIN"]) +app.include_router(detail_shots.router, prefix="/api/v1", tags=["details-shots"]) +app.include_router(health.router, prefix="/api/v1", tags=["Health"]) +app.include_router(background_edit.router, prefix="/api/v1", tags = ["Background_edit"]) +app.include_router(multi_view.router, prefix="/api/v1", tags = ["Multi_view"]) +app.include_router(editorial.router, prefix="/api/v1", tags = ["Editorial"]) + + +if __name__ == "__main__": + import uvicorn + uvicorn.run("main:app", host="0.0.0.0", port=5350, reload=True) diff --git a/test_images/dress.jpg b/test_images/dress.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be21770b721d9261014deb309b575869d2616f33 Binary files /dev/null and b/test_images/dress.jpg differ diff --git a/test_images/handbag.jpg b/test_images/handbag.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f41fa5d652ee3c8dbba97c540c0a9efa20844f01 Binary files /dev/null and b/test_images/handbag.jpg differ diff --git a/test_images/shoe.jpg b/test_images/shoe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f79d7bc27cb89cb5847a8d5c53b3f5487212179a Binary files /dev/null and b/test_images/shoe.jpg differ diff --git a/test_images/test.txt b/test_images/test.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test_images/top_shot_handbag.jpg b/test_images/top_shot_handbag.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30d25024d2774c19432065c906275e98b0333c09 Binary files /dev/null and b/test_images/top_shot_handbag.jpg differ diff --git a/test_images/wallet.jpg b/test_images/wallet.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f29fb0704dd93db2b06e78f316c21b2e4cab20a Binary files /dev/null and b/test_images/wallet.jpg differ diff --git a/test_images/wallet2.jpg b/test_images/wallet2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac32071bd64c72b64c7f20bcf79c5e9d6eee5bd2 Binary files /dev/null and b/test_images/wallet2.jpg differ diff --git a/tests/api/v1/__pycache__/test_detail_shots.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_detail_shots.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99ba00938aee6999466810c194f254dcad8b7918 Binary files /dev/null and b/tests/api/v1/__pycache__/test_detail_shots.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_detail_shots.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_detail_shots.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1d96c2ef113f805d68ab16d63cb9e9b7c9a25ea Binary files /dev/null and b/tests/api/v1/__pycache__/test_detail_shots.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_health.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_health.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa383d0d068cde2eed6dbd02c710fbd505402488 Binary files /dev/null and b/tests/api/v1/__pycache__/test_health.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_health.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_health.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..52043bc14484977fa72aba66cdd5b34ec7bf3dea Binary files /dev/null and b/tests/api/v1/__pycache__/test_health.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_mannequin.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_mannequin.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1114a72fb75824c7244cb85961ed3ff5eae4ae7 Binary files /dev/null and b/tests/api/v1/__pycache__/test_mannequin.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_mannequin.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_mannequin.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3da31c1cc2dd72201437a47d38ca00ca5a2d5f64 Binary files /dev/null and b/tests/api/v1/__pycache__/test_mannequin.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_occasion.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_occasion.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4192102839e43f3dfd60ecd387475d9eabc219cb Binary files /dev/null and b/tests/api/v1/__pycache__/test_occasion.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_occasion.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_occasion.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b9680245f68e69b3a9bfad0f88c6556fae9e4b0 Binary files /dev/null and b/tests/api/v1/__pycache__/test_occasion.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_outfit_match.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_outfit_match.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..822090792f47e4b1589fa1b0d81ac6377d4e9487 Binary files /dev/null and b/tests/api/v1/__pycache__/test_outfit_match.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_outfit_match.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_outfit_match.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a00f709591510b7ca26177bda6d6e43c75068e16 Binary files /dev/null and b/tests/api/v1/__pycache__/test_outfit_match.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_size_comparison.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_size_comparison.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b8d5e88bfe45806979f925523167c60e3a194fb Binary files /dev/null and b/tests/api/v1/__pycache__/test_size_comparison.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_size_comparison.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_size_comparison.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61c303daa2edf265eb4fb38dbd2b7d4f289924fe Binary files /dev/null and b/tests/api/v1/__pycache__/test_size_comparison.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_tryon.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_tryon.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc4a1a6b49f2933b8b29e80064d5b92a560563f4 Binary files /dev/null and b/tests/api/v1/__pycache__/test_tryon.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_tryon.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_tryon.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bff02d6821c6c71d91f4f004153c2be9fa7c1f9 Binary files /dev/null and b/tests/api/v1/__pycache__/test_tryon.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_two_object_size.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_two_object_size.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a035e7445120af8115e01cbbce9aa875c6516bc Binary files /dev/null and b/tests/api/v1/__pycache__/test_two_object_size.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_two_object_size.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_two_object_size.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ec9154102ad65309061be47a4b3ab7056f30ee0 Binary files /dev/null and b/tests/api/v1/__pycache__/test_two_object_size.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/__pycache__/test_whatfits.cpython-27-PYTEST.pyc b/tests/api/v1/__pycache__/test_whatfits.cpython-27-PYTEST.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10df473818a93b64dd679924624ef834328c4bfc Binary files /dev/null and b/tests/api/v1/__pycache__/test_whatfits.cpython-27-PYTEST.pyc differ diff --git a/tests/api/v1/__pycache__/test_whatfits.cpython-310-pytest-8.3.5.pyc b/tests/api/v1/__pycache__/test_whatfits.cpython-310-pytest-8.3.5.pyc new file mode 100644 index 0000000000000000000000000000000000000000..15e579a5165fe3ec8a8dc793850e9da7d7cb4c4e Binary files /dev/null and b/tests/api/v1/__pycache__/test_whatfits.cpython-310-pytest-8.3.5.pyc differ diff --git a/tests/api/v1/test_detail_shots.py b/tests/api/v1/test_detail_shots.py new file mode 100644 index 0000000000000000000000000000000000000000..6f0f3c9e6b156f45a039f2af56b28915c27adbae --- /dev/null +++ b/tests/api/v1/test_detail_shots.py @@ -0,0 +1,42 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app +import os + +client = TestClient(app) + +# Replace this with the actual image path on your system +TEST_IMAGE_PATH = "test_images/dress.jpg" +INVALID_FILE_PATH = "test_images/test.txt" + +def test_detail_shots_success(): + with open(TEST_IMAGE_PATH, "rb") as f: + files = { + "garment_images": ("test_image.jpg", f, "image/jpeg") + } + response = client.post("/api/v1/detail", files=files) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_detail_shots_invalid_file_type(): + with open(INVALID_FILE_PATH, "rb") as f: + files = { + "garment_images": ("test.txt", f, "text/plain") + } + response = client.post("/api/v1/detail", files=files) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_detail_shots_missing_file(): + response = client.post("/api/v1/detail") + assert response.status_code == 422 # FastAPI validation error + +def test_detail_shots_multiple_files(): + with open(TEST_IMAGE_PATH, "rb") as f1, open(TEST_IMAGE_PATH, "rb") as f2: + files = [ + ("garment_images", ("test_image1.jpg", f1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", f2, "image/jpeg")) + ] + response = client.post("/api/v1/detail", files=files) + assert response.status_code == 400 + assert "Exactly one garment images are required" in response.json()["detail"] diff --git a/tests/api/v1/test_health.py b/tests/api/v1/test_health.py new file mode 100644 index 0000000000000000000000000000000000000000..aa7a3fbbcb4b9054ce6d2db0d4264576664923a4 --- /dev/null +++ b/tests/api/v1/test_health.py @@ -0,0 +1,13 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app + +client = TestClient(app) + +def test_health_check(): + response = client.get("/api/v1/health") + assert response.status_code == 200 + assert response.json() == { + "status": "healthy", + "message": "API is up and running!" + } \ No newline at end of file diff --git a/tests/api/v1/test_mannequin.py b/tests/api/v1/test_mannequin.py new file mode 100644 index 0000000000000000000000000000000000000000..82e87672a522939ac38445d89c8f84453be87529 --- /dev/null +++ b/tests/api/v1/test_mannequin.py @@ -0,0 +1,41 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app + +client = TestClient(app) + +# Replace with your actual image file path +TEST_IMAGE_PATH = "test_images/dress.jpg" +INVALID_FILE_PATH = "test_images/test.txt" + +def test_mannequin_success(): + with open(TEST_IMAGE_PATH, "rb") as f: + files = { + "garment_images": ("test_image.jpg", f, "image/jpeg") + } + response = client.post("/api/v1/mannequin", files=files) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_mannequin_invalid_file_type(): + with open(INVALID_FILE_PATH, "rb") as f: + files = { + "garment_images": ("test.txt", f, "text/plain") + } + response = client.post("/api/v1/mannequin", files=files) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_mannequin_missing_file(): + response = client.post("/api/v1/mannequin") + assert response.status_code == 422 # FastAPI validation error + +def test_mannequin_multiple_files(): + with open(TEST_IMAGE_PATH, "rb") as f1, open(TEST_IMAGE_PATH, "rb") as f2: + files = [ + ("garment_images", ("test_image1.jpg", f1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", f2, "image/jpeg")) + ] + response = client.post("/api/v1/mannequin", files=files) + assert response.status_code == 400 + assert "Exactly one garment images are required" in response.json()["detail"] diff --git a/tests/api/v1/test_occasion.py b/tests/api/v1/test_occasion.py new file mode 100644 index 0000000000000000000000000000000000000000..7254cc40ae75475fdca69dbfe15f6c65a8613310 --- /dev/null +++ b/tests/api/v1/test_occasion.py @@ -0,0 +1,67 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app + +client = TestClient(app) + +# Update this path to point to your real image file +TEST_IMAGE_PATH = "test_images/handbag.jpg" +INVALID_FILE_PATH = "test_images/test.txt" + +common_data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" +} + +def test_occasion_success(): + with open(TEST_IMAGE_PATH, "rb") as f: + files = { + "garment_images": ("test_image.jpg", f, "image/jpeg") + } + response = client.post("/api/v1/occasion", files=files, data=common_data) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_occasion_invalid_model_attributes(): + with open(TEST_IMAGE_PATH, "rb") as f: + files = { + "garment_images": ("test_image.jpg", f, "image/jpeg") + } + invalid_data = common_data.copy() + invalid_data["garment_type"] = "invalid_type" # deliberately invalid + response = client.post("/api/v1/occasion", files=files, data=invalid_data) + assert response.status_code == 400 + assert "Invalid model or camera attributes" in response.json()["detail"] + +def test_occasion_invalid_file_type(): + with open(INVALID_FILE_PATH, "rb") as f: + files = { + "garment_images": ("test.txt", f, "text/plain") + } + response = client.post("/api/v1/occasion", files=files, data=common_data) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_occasion_missing_file(): + response = client.post("/api/v1/occasion", data=common_data) + assert response.status_code == 422 + +def test_occasion_multiple_files(): + with open(TEST_IMAGE_PATH, "rb") as f1, open(TEST_IMAGE_PATH, "rb") as f2: + files = [ + ("garment_images", ("test_image1.jpg", f1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", f2, "image/jpeg")) + ] + response = client.post("/api/v1/occasion", files=files, data=common_data) + assert response.status_code == 400 + assert "Exactly one garment images are required" in response.json()["detail"] diff --git a/tests/api/v1/test_outfit_match.py b/tests/api/v1/test_outfit_match.py new file mode 100644 index 0000000000000000000000000000000000000000..3e736d539060bead8b7badb4e1d6584ede2aca91 --- /dev/null +++ b/tests/api/v1/test_outfit_match.py @@ -0,0 +1,172 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app +from io import BytesIO + +client = TestClient(app) + +def test_outfit_match_success(): + # Create test image files + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + # Test data + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/outfit-match", files=files, data=data) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_outfit_match_invalid_model_attributes(): + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "garment_type": "invalid_type", # Invalid garment type + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/outfit-match", files=files, data=data) + assert response.status_code == 400 + assert "Invalid model or camera attributes" in response.json()["detail"] + +def test_outfit_match_invalid_file_type(): + test_file1 = BytesIO(b"not an image 1") + test_file1.name = "test1.txt" + test_file2 = BytesIO(b"not an image 2") + test_file2.name = "test2.txt" + + files = [ + ("garment_images", ("test1.txt", test_file1, "text/plain")), + ("garment_images", ("test2.txt", test_file2, "text/plain")) + ] + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/outfit-match", files=files, data=data) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_outfit_match_missing_file(): + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/outfit-match", data=data) + assert response.status_code == 422 # FastAPI validation error + +def test_outfit_match_single_file(): + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/outfit-match", files=files, data=data) + assert response.status_code == 400 + assert "Exactly two garment images are required" in response.json()["detail"] + +def test_outfit_match_three_files(): + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + test_image3 = BytesIO(b"fake image data 3") + test_image3.name = "test_image3.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")), + ("garment_images", ("test_image3.jpg", test_image3, "image/jpeg")) + ] + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/outfit-match", files=files, data=data) + assert response.status_code == 400 + assert "Exactly two garment images are required" in response.json()["detail"] \ No newline at end of file diff --git a/tests/api/v1/test_size_comparison.py b/tests/api/v1/test_size_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..2e30847d46df7eef2e46fa348e6dfa93c45208ad --- /dev/null +++ b/tests/api/v1/test_size_comparison.py @@ -0,0 +1,89 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app +from io import BytesIO + +client = TestClient(app) + +def test_size_comparison_success(): + # Create a test image file + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + # Test data + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/size", files=files, data=data) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_size_comparison_invalid_dimensions(): + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "product_height": "invalid", # Invalid height + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/size", files=files, data=data) + assert response.status_code == 400 + assert "Invalid dimensions" in response.json()["detail"] + +def test_size_comparison_invalid_file_type(): + test_file = BytesIO(b"not an image") + test_file.name = "test.txt" + + files = { + "garment_images": ("test.txt", test_file, "text/plain") + } + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/size", files=files, data=data) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_size_comparison_missing_file(): + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/size", data=data) + assert response.status_code == 422 # FastAPI validation error + +def test_size_comparison_multiple_files(): + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/size", files=files, data=data) + assert response.status_code == 400 + assert "Exactly one front shot of the image" in response.json()["detail"] \ No newline at end of file diff --git a/tests/api/v1/test_tryon.py b/tests/api/v1/test_tryon.py new file mode 100644 index 0000000000000000000000000000000000000000..d32a7bfbee3dd0d9d36e15859c6c052533699d2d --- /dev/null +++ b/tests/api/v1/test_tryon.py @@ -0,0 +1,134 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app +from io import BytesIO + +client = TestClient(app) + +def test_tryon_success(): + # Create a test image file + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + # Test data + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/tryon", files=files, data=data) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_tryon_invalid_model_attributes(): + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "garment_type": "invalid_type", # Invalid garment type + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/tryon", files=files, data=data) + assert response.status_code == 400 + assert "Invalid model or camera attributes" in response.json()["detail"] + +def test_tryon_invalid_file_type(): + test_file = BytesIO(b"not an image") + test_file.name = "test.txt" + + files = { + "garment_images": ("test.txt", test_file, "text/plain") + } + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/tryon", files=files, data=data) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_tryon_missing_file(): + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/tryon", data=data) + assert response.status_code == 422 # FastAPI validation error + +def test_tryon_multiple_files(): + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "garment_type": "dress", + "model_gender": "female", + "model_age_range": "25-35", + "model_body_shape": "athletic", + "model_race_ethnicity": "asian", + "model_pose": "standing", + "camera_view_angle": "front", + "camera_distance_meters": 2.0, + "camera_focal_length_mm": 50.0, + "camera_aperture_f_number": 2.8, + "camera_lighting_condition": "natural", + "camera_background": "studio" + } + + response = client.post("/api/v1/tryon", files=files, data=data) + assert response.status_code == 400 + assert "Exactly one garment images are required" in response.json()["detail"] \ No newline at end of file diff --git a/tests/api/v1/test_two_object_size.py b/tests/api/v1/test_two_object_size.py new file mode 100644 index 0000000000000000000000000000000000000000..3f51d412244896fd5f49ee2277a5f4ac4636ceb5 --- /dev/null +++ b/tests/api/v1/test_two_object_size.py @@ -0,0 +1,136 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app +from io import BytesIO + +client = TestClient(app) + +def test_two_object_size_success(): + # Create test image files + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + # Test data + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "product_height1": 10.0, + "product_width1": 15.0, + "product_length1": 2.0, + "product_height2": 12.0, + "product_width2": 18.0, + "product_length2": 2.5 + } + + response = client.post("/api/v1/walletsize", files=files, data=data) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_two_object_size_invalid_dimensions(): + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "product_height1": "invalid", # Invalid height + "product_width1": 15.0, + "product_length1": 2.0, + "product_height2": 12.0, + "product_width2": 18.0, + "product_length2": 2.5 + } + + response = client.post("/api/v1/walletsize", files=files, data=data) + assert response.status_code == 400 + assert "Invalid dimensions" in response.json()["detail"] + +def test_two_object_size_invalid_file_type(): + test_file1 = BytesIO(b"not an image 1") + test_file1.name = "test1.txt" + test_file2 = BytesIO(b"not an image 2") + test_file2.name = "test2.txt" + + files = [ + ("garment_images", ("test1.txt", test_file1, "text/plain")), + ("garment_images", ("test2.txt", test_file2, "text/plain")) + ] + data = { + "product_height1": 10.0, + "product_width1": 15.0, + "product_length1": 2.0, + "product_height2": 12.0, + "product_width2": 18.0, + "product_length2": 2.5 + } + + response = client.post("/api/v1/walletsize", files=files, data=data) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_two_object_size_missing_file(): + data = { + "product_height1": 10.0, + "product_width1": 15.0, + "product_length1": 2.0, + "product_height2": 12.0, + "product_width2": 18.0, + "product_length2": 2.5 + } + + response = client.post("/api/v1/walletsize", data=data) + assert response.status_code == 422 # FastAPI validation error + +def test_two_object_size_single_file(): + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "product_height1": 10.0, + "product_width1": 15.0, + "product_length1": 2.0, + "product_height2": 12.0, + "product_width2": 18.0, + "product_length2": 2.5 + } + + response = client.post("/api/v1/walletsize", files=files, data=data) + assert response.status_code == 400 + assert "Exactly two front shot images of wallet" in response.json()["detail"] + +def test_two_object_size_three_files(): + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + test_image3 = BytesIO(b"fake image data 3") + test_image3.name = "test_image3.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")), + ("garment_images", ("test_image3.jpg", test_image3, "image/jpeg")) + ] + data = { + "product_height1": 10.0, + "product_width1": 15.0, + "product_length1": 2.0, + "product_height2": 12.0, + "product_width2": 18.0, + "product_length2": 2.5 + } + + response = client.post("/api/v1/walletsize", files=files, data=data) + assert response.status_code == 400 + assert "Exactly two front shot images of wallet" in response.json()["detail"] \ No newline at end of file diff --git a/tests/api/v1/test_whatfits.py b/tests/api/v1/test_whatfits.py new file mode 100644 index 0000000000000000000000000000000000000000..889b21a651ea0063a0209b3bcfb74f9de8aeb635 --- /dev/null +++ b/tests/api/v1/test_whatfits.py @@ -0,0 +1,92 @@ +import pytest +from fastapi.testclient import TestClient +from app.main import app +from io import BytesIO +import json + +client = TestClient(app) + +def test_whatfits_success(): + # Create a test image file + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + # Test data + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/whatfits", files=files, data=data) + assert response.status_code == 200 + assert response.headers["content-type"] == "image/png" + +def test_whatfits_invalid_dimensions(): + test_image = BytesIO(b"fake image data") + test_image.name = "test_image.jpg" + + files = { + "garment_images": ("test_image.jpg", test_image, "image/jpeg") + } + data = { + "product_height": "invalid", # Invalid height + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/whatfits", files=files, data=data) + assert response.status_code == 400 + assert "Invalid dimensions" in response.json()["detail"] + +def test_whatfits_invalid_file_type(): + # Create a non-image file + test_file = BytesIO(b"not an image") + test_file.name = "test.txt" + + files = { + "garment_images": ("test.txt", test_file, "text/plain") + } + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/whatfits", files=files, data=data) + assert response.status_code == 400 + assert "Invalid file type" in response.json()["detail"] + +def test_whatfits_missing_file(): + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/whatfits", data=data) + assert response.status_code == 422 # FastAPI validation error + +def test_whatfits_multiple_files(): + # Create two test images + test_image1 = BytesIO(b"fake image data 1") + test_image1.name = "test_image1.jpg" + test_image2 = BytesIO(b"fake image data 2") + test_image2.name = "test_image2.jpg" + + files = [ + ("garment_images", ("test_image1.jpg", test_image1, "image/jpeg")), + ("garment_images", ("test_image2.jpg", test_image2, "image/jpeg")) + ] + data = { + "product_height": 30.0, + "product_width": 20.0, + "product_length": 15.0 + } + + response = client.post("/api/v1/whatfits", files=files, data=data) + assert response.status_code == 400 + assert "Exactly one top shot of the image" in response.json()["detail"] \ No newline at end of file