Spaces:
Paused
Paused
| from fastapi import APIRouter, Depends, HTTPException, BackgroundTasks, UploadFile, File, Form | |
| from fastapi.responses import JSONResponse, Response | |
| from auth import get_current_active_user, User, supabase | |
| import logging | |
| import httpx | |
| import os | |
| from typing import Optional, Dict, Any | |
| from pydantic import BaseModel | |
| import base64 | |
| from uuid import uuid4 | |
| from services.hunyuan_service import _hunyuan_image_to_3d | |
| import io | |
| import numpy as np | |
| from PIL import Image | |
| import trimesh | |
| import pyrender | |
| from trimesh.transformations import translation_matrix, rotation_matrix | |
| # Set PyOpenGL platform for headless rendering | |
| os.environ.setdefault("PYOPENGL_PLATFORM", "egl") | |
| def generate_thumbnail_from_bytes(mesh_data: bytes, size: int = 512) -> bytes: | |
| """ | |
| Generate a thumbnail image from 3D mesh bytes data. | |
| Args: | |
| mesh_data: The 3D mesh file as bytes (GLB format) | |
| size: Output image size in pixels (default 512x512) | |
| Returns: | |
| PNG image data as bytes | |
| """ | |
| try: | |
| # Load mesh from bytes | |
| mesh = trimesh.load(io.BytesIO(mesh_data), file_type="glb", force="mesh") | |
| # Get mesh dimensions before any transformations | |
| original_extents = mesh.extents | |
| longest_dimension = np.max(original_extents) | |
| # Scaling to normalize models to a target size | |
| target_size = 2.5 | |
| scale_factor = target_size / longest_dimension if longest_dimension > 0 else 1.0 | |
| # Calculate radius BEFORE scaling for consistent camera/lighting positioning | |
| bb = mesh.bounding_box_oriented.extents | |
| if not np.all(bb): | |
| bb = mesh.extents | |
| fixed_radius = np.linalg.norm(bb) * 0.6 | |
| # Center the mesh | |
| mesh.apply_translation(-mesh.bounding_box.centroid) | |
| # Apply scaling transformation | |
| mesh.apply_scale(scale_factor) | |
| # Rotate for better viewing angle | |
| rotation = rotation_matrix(np.radians(30), [0.3, -0.5, 0]) | |
| mesh.apply_transform(rotation) | |
| # Build scene | |
| tm_mesh = pyrender.Mesh.from_trimesh(mesh, smooth=False) | |
| scene = pyrender.Scene(bg_color=[0.15, 0.15, 0.15, 1]) # Gray background | |
| scene.add(tm_mesh) | |
| # Add lighting | |
| key_light = pyrender.PointLight(color=np.ones(3), intensity=40.0) | |
| fill_light = pyrender.PointLight(color=np.ones(3), intensity=20.0) | |
| back_light = pyrender.PointLight(color=np.ones(3), intensity=10.0) | |
| scene.add(key_light, pose=translation_matrix([fixed_radius, fixed_radius, fixed_radius])) | |
| scene.add(fill_light, pose=translation_matrix([-fixed_radius, fixed_radius, fixed_radius])) | |
| scene.add(back_light, pose=translation_matrix([0, -fixed_radius, -fixed_radius])) | |
| # Setup camera | |
| cam = pyrender.PerspectiveCamera(yfov=np.radians(45.0)) | |
| cam_pose = translation_matrix([0, 0, fixed_radius * 2.5]) | |
| scene.add(cam, pose=cam_pose) | |
| # Render thumbnail | |
| renderer = pyrender.OffscreenRenderer(viewport_width=size, viewport_height=size) | |
| try: | |
| color, _ = renderer.render(scene) | |
| finally: | |
| renderer.delete() | |
| # Convert to PIL Image and save as PNG bytes | |
| img = Image.fromarray(color) | |
| img_bytes = io.BytesIO() | |
| img.save(img_bytes, format='PNG') | |
| return img_bytes.getvalue() | |
| except Exception as e: | |
| logging.error(f"Failed to generate thumbnail: {str(e)}") | |
| raise | |
| router = APIRouter( | |
| prefix="/user/models", | |
| tags=["User Models"] # Removed global auth dependency; individual endpoints add it where needed | |
| ) | |
| async def refresh_generated_model(generated_model_id: str): | |
| """ | |
| Manual refresh endpoint. | |
| The front-end calls this route to fetch the latest status of a generation | |
| task and – if completed – persist the final Meshy response into Supabase. | |
| For text-to-3d with texture, this handles the two-step process (preview + refine). | |
| """ | |
| try: | |
| # Handle placeholder IDs from frontend | |
| if generated_model_id.startswith("placeholder_"): | |
| raise HTTPException(status_code=400, detail="Invalid model ID. Model may not be ready yet or generation is still initializing.") | |
| # Validate that generated_model_id is a valid integer | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| raise HTTPException(status_code=400, detail="Invalid model ID format. Expected numeric ID.") | |
| # 1) Validate existence & retrieve the record (removed ownership check for public access). | |
| db_resp = supabase.from_("Generated_Models").select("*").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not db_resp.data: | |
| raise HTTPException(status_code=404, detail="Model not found") | |
| generated_model = db_resp.data[0] | |
| prompts_config = generated_model.get("prompts_and_models_config", {}) | |
| generation_type = prompts_config.get("generation_type") | |
| should_texture = prompts_config.get("should_texture", False) | |
| # Special handling for Hunyuan generation (doesn't use Meshy API) | |
| if generation_type == "hunyuan_image_to_3d": | |
| # For completed Hunyuan models, provide the view URL for 3D display | |
| model_urls = None | |
| thumbnail_url = None | |
| if generated_model.get("status") == "COMPLETED": | |
| model_urls = { | |
| "glb": f"/user/models/{model_id_int}/view.glb" # Relative URL to our view endpoint with extension | |
| } | |
| # Check if thumbnail exists | |
| thumbnail_check = supabase.from_("Model_Files").select("model_file_id").eq("generated_model_id", model_id_int).eq("is_preview_file", True).eq("file_format", "png").limit(1).execute() | |
| if thumbnail_check.data: | |
| thumbnail_url = f"/user/models/{model_id_int}/thumbnail" | |
| return { | |
| "task_id": model_id_int, | |
| "status": generated_model.get("status", "IN_PROGRESS"), | |
| "progress": 100 if generated_model.get("status") == "COMPLETED" else 50, | |
| "model_urls": model_urls, | |
| "thumbnail_url": thumbnail_url, | |
| "texture_urls": None, | |
| "created_at": generated_model.get("created_at"), | |
| "started_at": generated_model.get("created_at"), | |
| "finished_at": generated_model.get("updated_at") if generated_model.get("status") == "COMPLETED" else None, | |
| "task_error": None, | |
| "database_updated": True, | |
| "generation_type": "hunyuan_image_to_3d", | |
| "message": "Hunyuan generation completed. 3D model ready for viewing." if generated_model.get("status") == "COMPLETED" else "Hunyuan generation in progress..." | |
| } | |
| meshy_task_id = generated_model.get("meshy_api_job_id") | |
| if not meshy_task_id: | |
| # Check if this model has files stored locally (similar to Hunyuan models) - removed ownership check for public access | |
| file_check = supabase.from_("Model_Files").select("model_file_id, file_name, file_format").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if file_check.data: | |
| # Model has local files - treat it like a Hunyuan model | |
| model_urls = None | |
| if generated_model.get("status") == "COMPLETED": | |
| # Determine file format for URL | |
| file_format = file_check.data[0].get("file_format", "glb").lower() | |
| model_urls = { | |
| file_format: f"/user/models/{model_id_int}/view.{file_format}" | |
| } | |
| return { | |
| "task_id": model_id_int, | |
| "status": generated_model.get("status", "IN_PROGRESS"), | |
| "progress": 100 if generated_model.get("status") == "COMPLETED" else 50, | |
| "model_urls": model_urls, | |
| "thumbnail_url": None, | |
| "texture_urls": None, | |
| "created_at": generated_model.get("created_at"), | |
| "started_at": generated_model.get("created_at"), | |
| "finished_at": generated_model.get("updated_at") if generated_model.get("status") == "COMPLETED" else None, | |
| "task_error": None, | |
| "database_updated": True, | |
| "generation_type": generation_type, | |
| "message": "Model completed. 3D model ready for viewing." if generated_model.get("status") == "COMPLETED" else "Model generation in progress..." | |
| } | |
| else: | |
| # No local files and no Meshy task ID - this model might be incomplete or from old system | |
| raise HTTPException(status_code=400, detail="Model has no associated files or Meshy task ID. This model may be incomplete or from an older system.") | |
| # 2) Query Meshy API for the latest status. | |
| meshy_api_key = os.getenv("MESHY_API_KEY") | |
| if not meshy_api_key: | |
| raise HTTPException(status_code=500, detail="MESHY_API_KEY not configured") | |
| async with httpx.AsyncClient(timeout=30.0) as client: | |
| headers = {"Authorization": f"Bearer {meshy_api_key}"} | |
| # Special handling for text-to-3d with texture (two-step process) | |
| if generation_type == "text_to_3d" and should_texture: | |
| return await _handle_text_to_3d_with_texture( | |
| model_id_int, generated_model, prompts_config, | |
| meshy_task_id, meshy_api_key, client, headers | |
| ) | |
| # Standard handling for other generation types | |
| # Determine which Meshy progress endpoint to query based on generation type | |
| if generation_type == "image_to_3d": | |
| meshy_progress_url = f"https://api.meshy.ai/openapi/v1/image-to-3d/{meshy_task_id}" | |
| elif generation_type == "multi_image_to_3d": | |
| meshy_progress_url = f"https://api.meshy.ai/openapi/v1/multi-image-to-3d/{meshy_task_id}" | |
| else: | |
| # Default to text-to-3d (without texture) | |
| meshy_progress_url = f"https://api.meshy.ai/openapi/v2/text-to-3d/{meshy_task_id}" | |
| response = await client.get( | |
| meshy_progress_url, | |
| headers=headers, | |
| ) | |
| if response.status_code != 200: | |
| raise HTTPException( | |
| status_code=response.status_code, | |
| detail=f"Failed to get task progress from Meshy AI: {response.text}" | |
| ) | |
| meshy_response = response.json() | |
| # If the task has succeeded, update the database | |
| if meshy_response.get("status") == "SUCCEEDED": | |
| # Update the Generated_Models record with the completed task information | |
| update_data = { | |
| "status": "COMPLETED", | |
| "updated_at": "now()", | |
| # Store the complete response in the prompts_and_models_config field | |
| "prompts_and_models_config": meshy_response | |
| } | |
| supabase.from_("Generated_Models").update(update_data).eq("generated_model_id", generated_model["generated_model_id"]).execute() | |
| logging.info(f"Updated generated model {generated_model['generated_model_id']} with completion data") | |
| # Return the progress information along with update status | |
| return { | |
| "task_id": model_id_int, | |
| "status": meshy_response.get("status"), | |
| "progress": meshy_response.get("progress"), | |
| "model_urls": meshy_response.get("model_urls"), | |
| "thumbnail_url": meshy_response.get("thumbnail_url"), | |
| "texture_urls": meshy_response.get("texture_urls"), | |
| "created_at": meshy_response.get("created_at"), | |
| "started_at": meshy_response.get("started_at"), | |
| "finished_at": meshy_response.get("finished_at"), | |
| "task_error": meshy_response.get("task_error"), | |
| "database_updated": meshy_response.get("status") == "SUCCEEDED" | |
| } | |
| except HTTPException: | |
| # Re-raise HTTP exceptions as-is | |
| raise | |
| except Exception as e: | |
| logging.error(f"Error in refresh_generated_model: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") | |
| async def _handle_text_to_3d_with_texture( | |
| generated_model_id: int, | |
| generated_model: Dict[str, Any], | |
| prompts_config: Dict[str, Any], | |
| preview_task_id: str, | |
| meshy_api_key: str, | |
| client: httpx.AsyncClient, | |
| headers: Dict[str, str] | |
| ): | |
| """Handle the two-step text-to-3d with texture process (preview + refine)""" | |
| refine_task_id = prompts_config.get("refine_task_id") | |
| stage = prompts_config.get("stage", "generating_preview") | |
| # Helper to build consistent response | |
| def _build_response(status: str, progress_val: float, extra: Dict[str, Any] = None): | |
| base = { | |
| "task_id": generated_model_id, | |
| "status": status, | |
| "progress": progress_val, | |
| "database_updated": False, | |
| } | |
| if extra: | |
| base.update(extra) | |
| return base | |
| # Case 1: Preview task still in progress or just completed | |
| if not refine_task_id: | |
| # Check preview task status | |
| preview_url = f"https://api.meshy.ai/openapi/v2/text-to-3d/{preview_task_id}" | |
| preview_resp = await client.get(preview_url, headers=headers) | |
| if preview_resp.status_code != 200: | |
| raise HTTPException( | |
| status_code=preview_resp.status_code, | |
| detail=f"Failed to get preview task progress: {preview_resp.text}" | |
| ) | |
| preview_data = preview_resp.json() | |
| # Preview still in progress | |
| if preview_data.get("status") != "SUCCEEDED": | |
| # Progress is 0-50% for preview phase | |
| preview_progress = preview_data.get("progress", 0) | |
| adjusted_progress = preview_progress / 2 | |
| return _build_response( | |
| preview_data.get("status", "IN_PROGRESS"), | |
| adjusted_progress, | |
| { | |
| "model_urls": preview_data.get("model_urls"), | |
| "thumbnail_url": preview_data.get("thumbnail_url"), | |
| "created_at": preview_data.get("created_at"), | |
| "started_at": preview_data.get("started_at"), | |
| } | |
| ) | |
| # Preview completed - launch refine task | |
| refine_payload = { | |
| "mode": "refine", | |
| "preview_task_id": preview_task_id, | |
| "texture_prompt": prompts_config.get("reframed_prompt", prompts_config.get("original_prompt", "")), | |
| "ai_model": "meshy-4" | |
| } | |
| refine_resp = await client.post( | |
| "https://api.meshy.ai/openapi/v2/text-to-3d", | |
| headers=headers, | |
| json=refine_payload | |
| ) | |
| if refine_resp.status_code not in (200, 201, 202): | |
| raise HTTPException( | |
| status_code=refine_resp.status_code, | |
| detail=f"Failed to create refine task: {refine_resp.text}" | |
| ) | |
| refine_data = refine_resp.json() | |
| new_refine_id = refine_data.get("result") or refine_data.get("id") or refine_data.get("task_id") | |
| if not new_refine_id: | |
| raise HTTPException(status_code=500, detail="No refine task ID received from Meshy API") | |
| # Update database with refine task info | |
| updated_config = { | |
| **prompts_config, | |
| "refine_task_id": new_refine_id, | |
| "stage": "refining" | |
| } | |
| supabase.from_("Generated_Models").update({ | |
| "prompts_and_models_config": updated_config | |
| }).eq("generated_model_id", generated_model_id).execute() | |
| return _build_response( | |
| "REFINING", | |
| 50, # 50% - preview done, refine started | |
| { | |
| "database_updated": True, | |
| "model_urls": preview_data.get("model_urls"), | |
| "thumbnail_url": preview_data.get("thumbnail_url"), | |
| } | |
| ) | |
| # Case 2: Refine task in progress or completed | |
| refine_url = f"https://api.meshy.ai/openapi/v2/text-to-3d/{refine_task_id}" | |
| refine_resp = await client.get(refine_url, headers=headers) | |
| if refine_resp.status_code != 200: | |
| raise HTTPException( | |
| status_code=refine_resp.status_code, | |
| detail=f"Failed to get refine task progress: {refine_resp.text}" | |
| ) | |
| refine_data = refine_resp.json() | |
| # Refine completed | |
| if refine_data.get("status") == "SUCCEEDED": | |
| # Update database as completed - overwrite config with refine response | |
| supabase.from_("Generated_Models").update({ | |
| "status": "COMPLETED", | |
| "updated_at": "now()", | |
| "prompts_and_models_config": refine_data | |
| }).eq("generated_model_id", generated_model_id).execute() | |
| # Return the complete refine data with additional metadata | |
| return { | |
| "task_id": generated_model_id, | |
| "database_updated": True, | |
| **refine_data | |
| } | |
| # Refine still in progress | |
| refine_progress = refine_data.get("progress", 0) | |
| # Progress is 50-100% for refine phase | |
| adjusted_progress = 50 + (refine_progress / 2) | |
| return _build_response( | |
| refine_data.get("status", "REFINING"), | |
| adjusted_progress, | |
| { | |
| "model_urls": None, | |
| "thumbnail_url": None, | |
| "texture_urls": None, | |
| "created_at": refine_data.get("created_at"), | |
| "started_at": refine_data.get("started_at"), | |
| } | |
| ) | |
| class TextPrompt(BaseModel): | |
| text: str | |
| # When true, a secondary *refine* task will be run to add texture to the model. | |
| should_texture: Optional[bool] = False | |
| # NEW: Request model for Image to 3D generation | |
| class ImageTo3DRequest(BaseModel): | |
| image_url: str | |
| # Optional Meshy parameters (all directly forwarded if provided) | |
| ai_model: Optional[str] = None | |
| topology: Optional[str] = None | |
| target_polycount: Optional[int] = None | |
| symmetry_mode: Optional[str] = None | |
| should_remesh: Optional[bool] = True | |
| should_texture: Optional[bool] = True | |
| enable_pbr: Optional[bool] = False | |
| texture_prompt: Optional[str] = None | |
| texture_image_url: Optional[str] = None | |
| moderation: Optional[bool] = None | |
| # Helper to check & decrement credits using Supabase | |
| async def _check_and_decrement_credits(user_id: str, cost: int = 1): | |
| """Validate that the user has at least *cost* credits available and deduct them. | |
| Args: | |
| user_id: The Supabase user identifier. | |
| cost: How many credits the generation should consume (default = 1). | |
| """ | |
| if cost < 1: | |
| # Defensive – we never expect non-positive costs | |
| cost = 1 | |
| credit = ( | |
| supabase.from_("User_Credit_Account") | |
| .select("num_of_available_gens") | |
| .eq("user_id", user_id) | |
| .single() | |
| .execute() | |
| ) | |
| if not credit.data: | |
| raise HTTPException(status_code=403, detail="No credit account found. Please complete your profile.") | |
| available = credit.data["num_of_available_gens"] | |
| if available is None or available < cost: | |
| raise HTTPException(status_code=402, detail="No credits left. Please purchase more to generate models.") | |
| new_credits = available - cost | |
| supabase.from_("User_Credit_Account").update({"num_of_available_gens": new_credits}).eq("user_id", user_id).execute() | |
| # Background task for text-to-3d processing | |
| async def _process_text_to_3d_background( | |
| generated_model_id: int, | |
| user_id: str, | |
| original_prompt: str, | |
| should_texture: bool = False, | |
| ): | |
| """Background task to handle OpenAI reframing and Meshy API call for text-to-3d""" | |
| try: | |
| # 1. Reframe prompt via OpenAI | |
| reframed_prompt = original_prompt # Default fallback | |
| openai_key = os.getenv("OPENAI_API_KEY") | |
| if openai_key: | |
| try: | |
| async with httpx.AsyncClient(timeout=30.0) as client: | |
| oai_resp = await client.post( | |
| "https://api.openai.com/v1/chat/completions", | |
| headers={"Authorization": f"Bearer {openai_key}"}, | |
| json={ | |
| "model": "gpt-4o-mini", | |
| "messages": [ | |
| {"role": "system", "content": "REPLY ONLY WITH NEW PROMPT, no other text. IF USER PROMPT CONTAINS HARMFUL CONTENT, CHANGE IT TO SOMETHING SAFE and somewhat related. Rephrase the user description to simple and short."}, | |
| {"role": "user", "content": original_prompt}, | |
| ], | |
| }, | |
| ) | |
| if oai_resp.status_code == 200: | |
| reframed_prompt = oai_resp.json()["choices"][0]["message"]["content"] | |
| except Exception as ex: | |
| logging.warning(f"OpenAI reframing failed, using original prompt: {ex}") | |
| # 2. Update DB with reframed prompt | |
| try: | |
| supabase.from_("Generated_Models").update({ | |
| "prompts_and_models_config": { | |
| "generation_type": "text_to_3d", | |
| "original_prompt": original_prompt, | |
| "reframed_prompt": reframed_prompt, | |
| "should_texture": should_texture, | |
| "status": "processing", | |
| "stage": "creating_3d_model", | |
| }, | |
| }).eq("generated_model_id", generated_model_id).execute() | |
| except Exception as ex: | |
| logging.warning(f"Failed to update DB with reframed prompt: {ex}") | |
| # 3. Send request to Meshy | |
| meshy_key = os.getenv("MESHY_API_KEY") | |
| if not meshy_key: | |
| logging.error("MESHY_API_KEY not configured") | |
| return | |
| meshy_payload = { | |
| "mode": "preview", | |
| "prompt": reframed_prompt, | |
| "ai_model": "meshy-5", | |
| } | |
| async with httpx.AsyncClient(timeout=30.0) as client: | |
| meshy_resp = await client.post( | |
| "https://api.meshy.ai/openapi/v2/text-to-3d", | |
| headers={"Authorization": f"Bearer {meshy_key}"}, | |
| json=meshy_payload, | |
| ) | |
| if meshy_resp.status_code not in (200, 201, 202): | |
| logging.error(f"Meshy API failed: {meshy_resp.status_code} - {meshy_resp.text}") | |
| return | |
| meshy_data = meshy_resp.json() | |
| meshy_task_id = meshy_data.get("result") or meshy_data.get("id") or meshy_data.get("task_id") | |
| if not meshy_task_id: | |
| logging.error("No task ID received from Meshy API") | |
| return | |
| # 4. Finalize DB record | |
| try: | |
| supabase.from_("Generated_Models").update({ | |
| "meshy_api_job_id": meshy_task_id, | |
| "prompts_and_models_config": { | |
| "generation_type": "text_to_3d", | |
| "original_prompt": original_prompt, | |
| "reframed_prompt": reframed_prompt, | |
| "should_texture": should_texture, | |
| "status": "processing", | |
| "stage": "generating_preview" if should_texture else "generating", | |
| "meshy_response": meshy_data, | |
| }, | |
| }).eq("generated_model_id", generated_model_id).execute() | |
| logging.info(f"Successfully started text-to-3d generation for model {generated_model_id}") | |
| except Exception as ex: | |
| logging.error(f"Failed to update DB with Meshy taskId: {ex}") | |
| except Exception as ex: | |
| logging.error(f"Background processing failed for text-to-3d model {generated_model_id}: {ex}") | |
| # CREDITS ARE NOT DECREMENTED HERE / DECREMENT BEFORE CALLING THIS FUNCTION | |
| async def _process_hunyuan_image_to_3d_background(generated_model_id: int, image_url: str, user_id: str): | |
| """Background task to handle Hunyuan API call for image-to-3d""" | |
| try: | |
| hunyuan_response = _hunyuan_image_to_3d(image_url) | |
| if not hunyuan_response: | |
| logging.error(f"Hunyuan API failed for model {generated_model_id}") | |
| return | |
| # Extract mesh URL from response | |
| mesh_url = hunyuan_response.get("output", {}).get("mesh") if "output" in hunyuan_response else hunyuan_response.get("mesh") | |
| if not mesh_url: | |
| logging.error(f"No mesh URL found in Hunyuan response for model {generated_model_id}") | |
| return | |
| # Download the mesh file | |
| async with httpx.AsyncClient(timeout=60.0) as client: | |
| mesh_response = await client.get(mesh_url) | |
| if mesh_response.status_code != 200: | |
| logging.error(f"Failed to download mesh file from {mesh_url}: {mesh_response.status_code}") | |
| return | |
| mesh_data = mesh_response.content | |
| # Determine file format from URL or default to .glb | |
| file_name = mesh_url.split("/")[-1] if "/" in mesh_url else f"hunyuan_model_{generated_model_id}.glb" | |
| if "." not in file_name: | |
| file_name += ".glb" | |
| file_format = file_name.split(".")[-1].lower() | |
| file_size = len(mesh_data) | |
| # Generate thumbnail from the mesh data | |
| thumbnail_data = None | |
| try: | |
| thumbnail_data = generate_thumbnail_from_bytes(mesh_data, size=512) | |
| logging.info(f"Successfully generated thumbnail for model {generated_model_id}") | |
| except Exception as ex: | |
| logging.error(f"Failed to generate thumbnail for model {generated_model_id}: {ex}") | |
| # Update DB record with Hunyuan response | |
| supabase.from_("Generated_Models").update({ | |
| "status": "COMPLETED", | |
| "updated_at": "now()", | |
| "prompts_and_models_config": hunyuan_response, | |
| }).eq("generated_model_id", generated_model_id).execute() | |
| # Convert binary data to Postgres bytea hex format ("\\x" prefix) for safe insertion | |
| encoded_hex_data = "\\x" + mesh_data.hex() | |
| # Insert the mesh file into Model_Files table | |
| supabase.from_("Model_Files").insert({ | |
| "user_id": user_id, | |
| "generated_model_id": generated_model_id, | |
| "model_data": encoded_hex_data, # stored as hex string compatible with bytea | |
| "file_name": file_name, | |
| "file_format": file_format, | |
| "file_size": file_size, | |
| "metadata": f"Hunyuan3D generated mesh file. Original URL: {mesh_url}", | |
| "is_preview_file": False, | |
| }).execute() | |
| # Insert the thumbnail if generation was successful | |
| if thumbnail_data: | |
| try: | |
| # Convert thumbnail bytes to hex format for Postgres bytea | |
| thumbnail_hex_data = "\\x" + thumbnail_data.hex() | |
| thumbnail_file_name = f"thumbnail_{generated_model_id}.png" | |
| supabase.from_("Model_Files").insert({ | |
| "user_id": user_id, | |
| "generated_model_id": generated_model_id, | |
| "model_data": thumbnail_hex_data, | |
| "file_name": thumbnail_file_name, | |
| "file_format": "png", | |
| "file_size": len(thumbnail_data), | |
| "metadata": "Generated thumbnail image for 3D model preview", | |
| "is_preview_file": True, # Flag to indicate this is a thumbnail/preview | |
| }).execute() | |
| logging.info(f"Successfully stored thumbnail for model {generated_model_id}") | |
| except Exception as ex: | |
| logging.error(f"Failed to store thumbnail for model {generated_model_id}: {ex}") | |
| logging.info(f"Successfully completed Hunyuan image-to-3d generation for model {generated_model_id}") | |
| except Exception as ex: | |
| logging.error(f"Background processing failed for Hunyuan image-to-3d model {generated_model_id}: {ex}") | |
| # Background task for image-to-3d processing | |
| async def _process_image_to_3d_background(generated_model_id: int, payload: Dict[str, Any], generation_type: str): | |
| """Background task to handle Meshy API call for image-to-3d""" | |
| try: | |
| # Send request to Meshy | |
| meshy_key = os.getenv("MESHY_API_KEY") | |
| if not meshy_key: | |
| logging.error("MESHY_API_KEY not configured") | |
| return | |
| # Determine the correct Meshy endpoint | |
| if generation_type == "multi_image_to_3d": | |
| meshy_endpoint = "https://api.meshy.ai/openapi/v1/multi-image-to-3d" | |
| else: | |
| meshy_endpoint = "https://api.meshy.ai/openapi/v1/image-to-3d" | |
| async with httpx.AsyncClient(timeout=30.0) as client: | |
| meshy_resp = await client.post( | |
| meshy_endpoint, | |
| headers={"Authorization": f"Bearer {meshy_key}"}, | |
| json=payload, | |
| ) | |
| if meshy_resp.status_code not in (200, 201, 202): | |
| logging.error(f"Meshy API failed: {meshy_resp.status_code} - {meshy_resp.text}") | |
| return | |
| meshy_data = meshy_resp.json() | |
| meshy_task_id = meshy_data.get("result") or meshy_data.get("id") or meshy_data.get("task_id") | |
| if not meshy_task_id: | |
| logging.error("No task ID received from Meshy API") | |
| return | |
| # Update DB record with Meshy task ID | |
| try: | |
| # Get current config to preserve it | |
| current_record = supabase.from_("Generated_Models").select("prompts_and_models_config").eq("generated_model_id", generated_model_id).single().execute() | |
| current_config = current_record.data.get("prompts_and_models_config", {}) if current_record.data else {} | |
| # Update config with Meshy response | |
| updated_config = {**current_config} | |
| updated_config.update({ | |
| "status": "processing", | |
| "stage": "generating", | |
| "meshy_response": meshy_data, | |
| }) | |
| supabase.from_("Generated_Models").update({ | |
| "meshy_api_job_id": meshy_task_id, | |
| "prompts_and_models_config": updated_config, | |
| }).eq("generated_model_id", generated_model_id).execute() | |
| logging.info(f"Successfully started {generation_type} generation for model {generated_model_id}") | |
| except Exception as ex: | |
| logging.error(f"Failed to update DB with Meshy taskId: {ex}") | |
| except Exception as ex: | |
| logging.error(f"Background processing failed for {generation_type} model {generated_model_id}: {ex}") | |
| async def text_to_3d(prompt: TextPrompt, background_tasks: BackgroundTasks, current_user: User = Depends(get_current_active_user)): | |
| """ | |
| Create a Meshy Text-to-3D generation job. | |
| Returns immediately after creating the database record. All processing | |
| (OpenAI reframing, Meshy API call) happens in the background. | |
| """ | |
| # Determine credit cost (texture generation costs 3 credits) | |
| should_texture_flag = getattr(prompt, "should_texture", False) | |
| credit_cost = 3 if should_texture_flag else 1 | |
| # Credit check and decrement | |
| await _check_and_decrement_credits(current_user.id, credit_cost) | |
| # Insert initial DB record and return immediately | |
| try: | |
| insert_res = supabase.from_("Generated_Models").insert({ | |
| "status": "IN_PROGRESS", | |
| "user_id": current_user.id, | |
| "meshy_api_job_id": None, | |
| "model_name": f"{prompt.text[:50]}{'...' if len(prompt.text) > 50 else ''}", | |
| "prompts_and_models_config": { | |
| "generation_type": "text_to_3d", | |
| "original_prompt": prompt.text, | |
| "should_texture": should_texture_flag, | |
| "status": "initializing", | |
| "stage": "reframing_prompt", | |
| }, | |
| }).execute() | |
| generated_model_id = insert_res.data[0]["generated_model_id"] if insert_res.data else None | |
| if not generated_model_id: | |
| raise HTTPException(status_code=500, detail="Failed to create model record") | |
| # Add background task for processing | |
| background_tasks.add_task( | |
| _process_text_to_3d_background, | |
| generated_model_id, | |
| current_user.id, | |
| prompt.text, | |
| should_texture_flag, | |
| ) | |
| # Return immediately with explicit headers | |
| response_data = { | |
| "generated_model_id": generated_model_id, | |
| "status": "initializing", | |
| "original_prompt": prompt.text, | |
| "message": "Generation started. Use the progress_update endpoint to check status." | |
| } | |
| logging.info(f"Returning response for text-to-3d: {response_data}") | |
| response = JSONResponse(content=response_data, status_code=200) | |
| # Allowed header; avoids disallowed connection-specific headers under HTTP/2 | |
| response.headers["Cache-Control"] = "no-cache" | |
| return response | |
| except Exception as ex: | |
| logging.error(f"Failed to create initial model DB record: {ex}") | |
| raise HTTPException(status_code=500, detail=f"Failed to start generation: {ex}") | |
| async def image_to_3d( | |
| background_tasks: BackgroundTasks, | |
| image: UploadFile = File(None), | |
| image_url: Optional[str] = Form(None), | |
| current_user: User = Depends(get_current_active_user), | |
| ): | |
| """ | |
| Create a Hunyuan3D Image-to-3D generation job. | |
| The client can either: | |
| 1. Upload an image file (multipart/form-data) via the "image" field | |
| 2. Provide an already publicly accessible URL via the "image_url" form field | |
| If a file is uploaded we first store it in Supabase Storage and use the | |
| resulting public URL when triggering the Hunyuan job. | |
| """ | |
| # Validate input – at least one source must be provided | |
| if image is None and not image_url: | |
| raise HTTPException(status_code=400, detail="Either an image file or image_url must be provided") | |
| # If we received an image file, upload it to Supabase Storage to obtain a public URL | |
| if image is not None: | |
| content = await image.read() | |
| if not content: | |
| raise HTTPException(status_code=400, detail="Uploaded image is empty") | |
| file_ext = os.path.splitext(image.filename)[1] or ".jpg" | |
| unique_name = f"{uuid4().hex}{file_ext}" | |
| # Determine bucket name (hard-coded to avoid missing env vars) | |
| bucket_name = "hunyuan-inputs" # storage bucket for Hunyuan inputs | |
| try: | |
| # Upload bytes to Supabase Storage | |
| upload_resp = supabase.storage.from_(bucket_name).upload( | |
| unique_name, | |
| content, | |
| {"content-type": image.content_type or "application/octet-stream"}, | |
| ) | |
| # Handle both supabase-py <2.0 (dict response) and >=2.0 (UploadResponse object) | |
| upload_error = None | |
| if isinstance(upload_resp, dict): | |
| upload_error = upload_resp.get("error") | |
| elif hasattr(upload_resp, "error"): | |
| upload_error = upload_resp.error | |
| if upload_error: | |
| # Ensure we always raise a string for logging / HTTPException | |
| raise RuntimeError(str(upload_error)) | |
| public_url_resp = supabase.storage.from_(bucket_name).get_public_url(unique_name) | |
| # Similar compatibility handling for get_public_url() | |
| if isinstance(public_url_resp, str): | |
| image_url = public_url_resp | |
| elif isinstance(public_url_resp, dict): | |
| image_url = public_url_resp.get("publicURL") or public_url_resp.get("publicUrl") | |
| elif hasattr(public_url_resp, "data") and isinstance(public_url_resp.data, dict): | |
| image_url = public_url_resp.data.get("publicURL") or public_url_resp.data.get("publicUrl") | |
| else: | |
| image_url = None | |
| if not image_url: | |
| raise RuntimeError("Failed to retrieve public URL for uploaded image") | |
| except Exception as ex: | |
| logging.error(f"Failed to upload image to Supabase storage: {ex}") | |
| raise HTTPException(status_code=500, detail="Failed to upload image to storage") | |
| # At this point, image_url should be a publicly accessible URL | |
| if not image_url: | |
| raise HTTPException(status_code=400, detail="Could not determine image URL") | |
| # Credit check and decrement - Hunyuan generation costs 2 credits | |
| await _check_and_decrement_credits(current_user.id, 2) | |
| source_name = image.filename if image is not None else (image_url.split('/')[-1] if '/' in image_url else 'image') | |
| # Insert initial DB record and return immediately | |
| try: | |
| insert_res = supabase.from_("Generated_Models").insert({ | |
| "status": "IN_PROGRESS", | |
| "user_id": current_user.id, | |
| "meshy_api_job_id": None, | |
| "model_name": f"Hunyuan 3D from {source_name}", | |
| "prompts_and_models_config": { | |
| "generation_type": "hunyuan_image_to_3d", | |
| "input_image_url": image_url, | |
| "status": "initializing", | |
| "stage": "processing_image", | |
| }, | |
| }).execute() | |
| generated_model_id = insert_res.data[0]["generated_model_id"] if insert_res.data else None | |
| if not generated_model_id: | |
| raise HTTPException(status_code=500, detail="Failed to create model record") | |
| # Add background task for Hunyuan processing | |
| background_tasks.add_task( | |
| _process_hunyuan_image_to_3d_background, | |
| generated_model_id, | |
| image_url, | |
| current_user.id, | |
| ) | |
| response_data = { | |
| "generated_model_id": generated_model_id, | |
| "status": "initializing", | |
| "input_image_url": image_url, | |
| "message": "Hunyuan 3D generation started. Use the progress_update endpoint to check status.", | |
| } | |
| logging.info(f"Returning response for hunyuan image-to-3d: {response_data}") | |
| response = JSONResponse(content=response_data, status_code=200) | |
| response.headers["Cache-Control"] = "no-cache" | |
| return response | |
| except Exception as ex: | |
| logging.error(f"Failed to create initial model DB record: {ex}") | |
| raise HTTPException(status_code=500, detail=f"Failed to start generation: {ex}") | |
| async def get_model_file( | |
| generated_model_id: str | |
| ): | |
| """ | |
| Get the model file info for a generated model belonging to the current user. | |
| """ | |
| try: | |
| # Handle placeholder IDs and validate integer format | |
| if generated_model_id.startswith("placeholder_"): | |
| raise HTTPException(status_code=400, detail="Invalid model ID. Model may not be ready yet or generation is still initializing.") | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| raise HTTPException(status_code=400, detail="Invalid model ID format. Expected numeric ID.") | |
| # First check if the model exists (removed user ownership check for public access) | |
| model_check = supabase.from_("Generated_Models").select("generated_model_id, user_id, model_name").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not model_check.data: | |
| raise HTTPException(status_code=404, detail="Model not found.") | |
| # Get the single model file for this generated model (removed ownership verification for public access) | |
| file_result = supabase.from_("Model_Files").select("model_file_id, file_name, file_format, file_size, metadata, is_preview_file, created_at").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not file_result.data: | |
| raise HTTPException(status_code=404, detail="No model file found for this generated model.") | |
| return { | |
| "generated_model_id": generated_model_id, | |
| "model_name": model_check.data[0].get("model_name"), | |
| "file": file_result.data[0] | |
| } | |
| except HTTPException: | |
| # Re-raise HTTP exceptions as-is | |
| raise | |
| except Exception as e: | |
| logging.error(f"Failed to get model file for {generated_model_id}: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") | |
| async def download_model_file( | |
| generated_model_id: str | |
| ): | |
| """ | |
| Download the model file for a generated model. | |
| """ | |
| try: | |
| # Handle placeholder IDs and validate integer format | |
| if generated_model_id.startswith("placeholder_"): | |
| raise HTTPException(status_code=400, detail="Invalid model ID. Model may not be ready yet or generation is still initializing.") | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| raise HTTPException(status_code=400, detail="Invalid model ID format. Expected numeric ID.") | |
| # First check if the model exists (removed user ownership check for public access) | |
| model_check = supabase.from_("Generated_Models").select("generated_model_id, user_id, model_name").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not model_check.data: | |
| raise HTTPException(status_code=404, detail="Model not found.") | |
| # Get the model file (removed ownership verification for public access) | |
| file_result = supabase.from_("Model_Files").select("*").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not file_result.data: | |
| raise HTTPException(status_code=404, detail="No model file found for this generated model.") | |
| file_data = file_result.data[0] | |
| # Supabase stores bytea as base64-encoded strings; decode before sending. | |
| raw_data = file_data.get("model_data") | |
| if isinstance(raw_data, str): | |
| try: | |
| # Attempt base64 decode | |
| raw_data = base64.b64decode(raw_data) | |
| except Exception: | |
| # Fallback for hex format ("\\x" prefix) | |
| if raw_data.startswith("\\x"): | |
| raw_data = bytes.fromhex(raw_data[2:]) | |
| if raw_data is None: | |
| raise HTTPException(status_code=500, detail="Failed to decode model file data.") | |
| return Response( | |
| content=raw_data, | |
| media_type="application/octet-stream", | |
| headers={ | |
| "Content-Disposition": f"attachment; filename={file_data['file_name']}" | |
| } | |
| ) | |
| except HTTPException: | |
| # Re-raise HTTP exceptions as-is | |
| raise | |
| except Exception as e: | |
| logging.error(f"Failed to download model file for {generated_model_id}: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") | |
| async def view_model_file( | |
| generated_model_id: str | |
| ): | |
| """ | |
| Serve the 3D model file inline for frontend 3D viewers. | |
| This endpoint serves the file with appropriate headers for direct consumption by 3D libraries. | |
| """ | |
| try: | |
| # Handle placeholder IDs and validate integer format | |
| if generated_model_id.startswith("placeholder_"): | |
| logging.warning(f"Placeholder ID received: {generated_model_id}") | |
| raise HTTPException(status_code=400, detail="Invalid model ID. Model may not be ready yet or generation is still initializing.") | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| logging.error(f"Invalid model ID format: {generated_model_id}") | |
| raise HTTPException(status_code=400, detail="Invalid model ID format. Expected numeric ID.") | |
| logging.info(f"Looking up model {model_id_int} for view endpoint") | |
| # First check if the model exists (removed user ownership check for public access) | |
| model_check = supabase.from_("Generated_Models").select("generated_model_id, user_id, model_name, status").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not model_check.data: | |
| logging.error(f"Model {model_id_int} not found in Generated_Models table") | |
| raise HTTPException(status_code=404, detail="Model not found.") | |
| logging.info(f"Found model {model_id_int}: {model_check.data[0]}") | |
| # Get the model file (removed ownership verification for public access) | |
| file_result = supabase.from_("Model_Files").select("*").eq("generated_model_id", model_id_int).limit(1).execute() # .eq("user_id", current_user.id) - commented out for public access | |
| if not file_result.data: | |
| logging.error(f"No model file found for model {model_id_int} in Model_Files table") | |
| raise HTTPException(status_code=404, detail="No model file found for this generated model.") | |
| file_data = file_result.data[0] | |
| # Supabase stores bytea as base64-encoded strings; decode before sending. | |
| raw_data = file_data.get("model_data") | |
| if isinstance(raw_data, str): | |
| try: | |
| # Attempt base64 decode | |
| raw_data = base64.b64decode(raw_data) | |
| except Exception: | |
| # Fallback for hex format ("\\x" prefix) | |
| if raw_data.startswith("\\x"): | |
| raw_data = bytes.fromhex(raw_data[2:]) | |
| if raw_data is None: | |
| raise HTTPException(status_code=500, detail="Failed to decode model file data.") | |
| # Determine appropriate MIME type based on file format | |
| file_format = file_data.get("file_format", "").lower() | |
| content_type = "application/octet-stream" # Default fallback | |
| if file_format == "glb": | |
| content_type = "model/gltf-binary" | |
| elif file_format == "gltf": | |
| content_type = "model/gltf+json" | |
| elif file_format == "obj": | |
| content_type = "text/plain" # OBJ files are text-based | |
| elif file_format == "stl": | |
| content_type = "model/stl" | |
| elif file_format == "fbx": | |
| content_type = "application/octet-stream" | |
| return Response( | |
| content=raw_data, | |
| media_type=content_type, | |
| headers={ | |
| "Access-Control-Allow-Origin": "*", | |
| "Access-Control-Allow-Methods": "GET, HEAD, OPTIONS", | |
| "Access-Control-Allow-Headers": "Authorization, Content-Type", | |
| "Cache-Control": "public, max-age=3600" # Cache for 1 hour | |
| } | |
| ) | |
| except HTTPException: | |
| # Re-raise HTTP exceptions as-is | |
| raise | |
| except Exception as e: | |
| logging.error(f"Failed to serve model file for {generated_model_id}: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") | |
| # Allow URLs like /user/models/{id}/view.glb or .gltf etc. | |
| async def view_model_file_with_ext(generated_model_id: str, file_ext: str): | |
| """Proxy to view_model_file to serve model regardless of extension in URL.""" | |
| return await view_model_file(generated_model_id) | |
| async def get_model_thumbnail(generated_model_id: str): | |
| """ | |
| Serve the thumbnail image for a generated model. | |
| Returns a PNG image that can be displayed in the frontend for model previews. | |
| """ | |
| try: | |
| # Handle placeholder IDs and validate integer format | |
| if generated_model_id.startswith("placeholder_"): | |
| raise HTTPException(status_code=400, detail="Invalid model ID. Model may not be ready yet or generation is still initializing.") | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| raise HTTPException(status_code=400, detail="Invalid model ID format. Expected numeric ID.") | |
| # First check if the model exists (removed user ownership check for public access) | |
| model_check = supabase.from_("Generated_Models").select("generated_model_id, user_id, model_name").eq("generated_model_id", model_id_int).limit(1).execute() | |
| if not model_check.data: | |
| raise HTTPException(status_code=404, detail="Model not found.") | |
| # Get the thumbnail file (removed ownership verification for public access) | |
| thumbnail_result = supabase.from_("Model_Files").select("*").eq("generated_model_id", model_id_int).eq("is_preview_file", True).eq("file_format", "png").limit(1).execute() | |
| if not thumbnail_result.data: | |
| raise HTTPException(status_code=404, detail="No thumbnail found for this model.") | |
| thumbnail_data = thumbnail_result.data[0] | |
| # Decode the thumbnail data | |
| raw_data = thumbnail_data.get("model_data") | |
| if isinstance(raw_data, str): | |
| try: | |
| # Attempt base64 decode | |
| raw_data = base64.b64decode(raw_data) | |
| except Exception: | |
| # Fallback for hex format ("\\x" prefix) | |
| if raw_data.startswith("\\x"): | |
| raw_data = bytes.fromhex(raw_data[2:]) | |
| if raw_data is None: | |
| raise HTTPException(status_code=500, detail="Failed to decode thumbnail data.") | |
| return Response( | |
| content=raw_data, | |
| media_type="image/png", | |
| headers={ | |
| "Access-Control-Allow-Origin": "*", | |
| "Access-Control-Allow-Methods": "GET, HEAD, OPTIONS", | |
| "Access-Control-Allow-Headers": "Authorization, Content-Type", | |
| "Cache-Control": "public, max-age=3600" # Cache for 1 hour | |
| } | |
| ) | |
| except HTTPException: | |
| # Re-raise HTTP exceptions as-is | |
| raise | |
| except Exception as e: | |
| logging.error(f"Failed to serve thumbnail for {generated_model_id}: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") | |
| async def debug_model(generated_model_id: str): | |
| """ | |
| Debug endpoint to check model and file existence in production. | |
| Returns detailed information about what's in the database. | |
| """ | |
| try: | |
| # Handle placeholder IDs and validate integer format | |
| if generated_model_id.startswith("placeholder_"): | |
| return {"error": "Placeholder ID", "generated_model_id": generated_model_id} | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| return {"error": "Invalid ID format", "generated_model_id": generated_model_id} | |
| # Check if model exists | |
| model_check = supabase.from_("Generated_Models").select("*").eq("generated_model_id", model_id_int).execute() | |
| # Check for files | |
| files_check = supabase.from_("Model_Files").select("*").eq("generated_model_id", model_id_int).execute() | |
| return { | |
| "generated_model_id": model_id_int, | |
| "model_exists": bool(model_check.data), | |
| "model_data": model_check.data[0] if model_check.data else None, | |
| "files_exist": bool(files_check.data), | |
| "files_count": len(files_check.data) if files_check.data else 0, | |
| "files_data": files_check.data if files_check.data else [] | |
| } | |
| except Exception as e: | |
| return { | |
| "error": f"Debug error: {str(e)}", | |
| "generated_model_id": generated_model_id | |
| } | |
| async def delete_model( | |
| generated_model_id: str, | |
| current_user: User = Depends(get_current_active_user) | |
| ): | |
| """ | |
| Delete a generated model and its associated files for the current user. | |
| """ | |
| try: | |
| # Handle placeholder IDs and validate integer format | |
| if generated_model_id.startswith("placeholder_"): | |
| raise HTTPException(status_code=400, detail="Invalid model ID. Model may not be ready yet or generation is still initializing.") | |
| try: | |
| model_id_int = int(generated_model_id) | |
| except ValueError: | |
| raise HTTPException(status_code=400, detail="Invalid model ID format. Expected numeric ID.") | |
| # First check if the model exists and belongs to the user | |
| model_check = supabase.from_("Generated_Models").select("generated_model_id, user_id, model_name").eq("generated_model_id", model_id_int).eq("user_id", current_user.id).limit(1).execute() | |
| if not model_check.data: | |
| raise HTTPException(status_code=404, detail="Model not found or you do not have permission to delete it.") | |
| # Delete associated model files first | |
| files_delete_result = supabase.from_("Model_Files").delete().eq("generated_model_id", model_id_int).eq("user_id", current_user.id).execute() | |
| # Log how many files were deleted | |
| files_deleted_count = len(files_delete_result.data) if files_delete_result.data else 0 | |
| if files_deleted_count > 0: | |
| logging.info(f"Deleted {files_deleted_count} model file(s) for model {generated_model_id}") | |
| # Delete the model record | |
| delete_result = supabase.from_("Generated_Models").delete().eq("generated_model_id", model_id_int).eq("user_id", current_user.id).execute() | |
| # The delete operation should return the deleted record(s) | |
| if not delete_result.data: | |
| raise HTTPException(status_code=500, detail="Failed to delete model from database.") | |
| logging.info(f"Successfully deleted model {generated_model_id} for user {current_user.id}") | |
| return { | |
| "message": "Model and associated files deleted successfully.", | |
| "deleted_model_id": generated_model_id, | |
| "model_name": model_check.data[0].get("model_name", "Unknown"), | |
| "files_deleted": files_deleted_count | |
| } | |
| except HTTPException: | |
| # Re-raise HTTP exceptions as-is | |
| raise | |
| except Exception as e: | |
| logging.error(f"Failed to delete model {generated_model_id}: {str(e)}") | |
| raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}") | |