Xernive's picture
fix: revert to API client with better error handling (Hunyuan3D not pip-installable)
26f8b9a
"""Hunyuan3D-2.1 LOCAL generation using your L4 GPU."""
# CRITICAL: Import spaces BEFORE torch/CUDA packages
import spaces
import torch
from pathlib import Path
from PIL import Image
from core.config import QualityPreset
from utils.memory import MemoryManager
class HunyuanLocalGenerator:
"""Generates 3D models using Hunyuan3D-2.1 LOCALLY on your L4 GPU."""
def __init__(self):
self.memory_manager = MemoryManager()
self.pipeline = None
self._model_loaded = False
def _load_model(self):
"""Load Hunyuan3D-2.1 model (lazy loading)."""
if self._model_loaded:
return
print("[Hunyuan3D Local] Loading model from HuggingFace Hub...")
try:
# Use diffusers pipeline (Hunyuan3D is based on diffusers)
from diffusers import DiffusionPipeline
# Load model from HuggingFace Hub
self.pipeline = DiffusionPipeline.from_pretrained(
'tencent/Hunyuan3D-2.1',
torch_dtype=torch.float16,
device_map="auto",
trust_remote_code=True # Required for custom models
)
print("[Hunyuan3D Local] Model loaded successfully!")
self._model_loaded = True
except Exception as e:
print(f"[Hunyuan3D Local] Failed to load model: {e}")
print("[Hunyuan3D Local] Falling back to API client...")
# Model loading failed, we'll need to use API client
self._model_loaded = False
raise RuntimeError(
f"Failed to load Hunyuan3D-2.1 model locally: {e}\n"
f"The model may not support direct loading via diffusers.\n"
f"Falling back to API client (external space)."
)
@spaces.GPU(duration=120)
def generate(
self,
image_path: Path,
preset: QualityPreset,
output_dir: Path
) -> Path:
"""
Generate 3D model from 2D image using LOCAL Hunyuan3D.
Args:
image_path: Path to input image
preset: Quality preset with generation parameters
output_dir: Directory to save output
Returns:
Path to generated GLB file
"""
try:
print(f"[Hunyuan3D Local] Generating 3D model: {preset.name} quality")
print(f"[Hunyuan3D Local] Input image: {image_path}")
print(f"[Hunyuan3D Local] Settings: steps={preset.hunyuan_steps}, guidance={preset.hunyuan_guidance}, octree={preset.octree_resolution}")
# Validate input image exists
if not image_path.exists():
raise FileNotFoundError(f"Input image not found: {image_path}")
# Load model (lazy loading)
self._load_model()
# Load image
print(f"[Hunyuan3D Local] Loading image...")
image = Image.open(image_path).convert('RGB')
# Generate 3D model
print(f"[Hunyuan3D Local] Generating mesh...")
result = self.pipeline(
image=image,
num_inference_steps=preset.hunyuan_steps,
guidance_scale=preset.hunyuan_guidance,
octree_resolution=preset.octree_resolution,
seed=1234
)
# Extract mesh (result is a list with mesh as first element)
if not result or len(result) == 0:
raise ValueError("Hunyuan3D returned empty result")
mesh = result[0]
print(f"[Hunyuan3D Local] Mesh generated successfully")
# Save as GLB
output_path = output_dir / f"hunyuan_{int(Path(image_path).stem.split('_')[-1])}.glb"
mesh.export(str(output_path))
print(f"[Hunyuan3D Local] Model saved: {output_path}")
# Cleanup
import gc
gc.collect()
torch.cuda.empty_cache()
return output_path
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"[Hunyuan3D Local] ERROR: {e}")
print(f"[Hunyuan3D Local] Full traceback:\n{error_details}")
# Provide helpful error message
if "out of memory" in str(e).lower():
raise RuntimeError(
f"GPU out of memory. Try using a lower quality preset (Fast or Balanced)."
) from e
elif "model" in str(e).lower() and "not found" in str(e).lower():
raise RuntimeError(
f"Hunyuan3D model not found. Check requirements.txt includes:\n"
f" git+https://github.com/Tencent-Hunyuan/Hunyuan3D-2.1.git"
) from e
else:
raise RuntimeError(
f"Hunyuan3D generation failed: {e}. Check logs for details."
) from e