Bobby commited on
Commit
a00e29b
·
1 Parent(s): b2ce605

Revert app to original TRELLIS runtime with ZeroGPU preview-first flow

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -2
  2. Dockerfile +6 -1
  3. app.py +527 -261
  4. requirements.txt +9 -0
  5. trellis/__init__.py +6 -0
  6. trellis/models/__init__.py +70 -0
  7. trellis/models/sparse_structure_flow.py +204 -0
  8. trellis/models/sparse_structure_vae.py +310 -0
  9. trellis/models/structured_latent_flow.py +262 -0
  10. trellis/models/structured_latent_vae/__init__.py +4 -0
  11. trellis/models/structured_latent_vae/base.py +117 -0
  12. trellis/models/structured_latent_vae/decoder_gs.py +122 -0
  13. trellis/models/structured_latent_vae/decoder_mesh.py +173 -0
  14. trellis/models/structured_latent_vae/decoder_rf.py +104 -0
  15. trellis/models/structured_latent_vae/encoder.py +72 -0
  16. trellis/modules/attention/__init__.py +38 -0
  17. trellis/modules/attention/full_attn.py +140 -0
  18. trellis/modules/attention/modules.py +146 -0
  19. trellis/modules/norm.py +27 -0
  20. trellis/modules/sparse/__init__.py +102 -0
  21. trellis/modules/sparse/attention/__init__.py +4 -0
  22. trellis/modules/sparse/attention/full_attn.py +215 -0
  23. trellis/modules/sparse/attention/modules.py +139 -0
  24. trellis/modules/sparse/attention/serialized_attn.py +193 -0
  25. trellis/modules/sparse/attention/windowed_attn.py +135 -0
  26. trellis/modules/sparse/basic.py +459 -0
  27. trellis/modules/sparse/conv/__init__.py +21 -0
  28. trellis/modules/sparse/conv/conv_spconv.py +80 -0
  29. trellis/modules/sparse/conv/conv_torchsparse.py +38 -0
  30. trellis/modules/sparse/linear.py +15 -0
  31. trellis/modules/sparse/nonlinearity.py +35 -0
  32. trellis/modules/sparse/norm.py +60 -0
  33. trellis/modules/sparse/spatial.py +110 -0
  34. trellis/modules/sparse/transformer/__init__.py +2 -0
  35. trellis/modules/sparse/transformer/blocks.py +151 -0
  36. trellis/modules/sparse/transformer/modulated.py +166 -0
  37. trellis/modules/spatial.py +48 -0
  38. trellis/modules/transformer/__init__.py +2 -0
  39. trellis/modules/transformer/blocks.py +182 -0
  40. trellis/modules/transformer/modulated.py +157 -0
  41. trellis/modules/utils.py +54 -0
  42. trellis/pipelines/__init__.py +24 -0
  43. trellis/pipelines/base.py +120 -0
  44. trellis/pipelines/samplers/__init__.py +2 -0
  45. trellis/pipelines/samplers/base.py +20 -0
  46. trellis/pipelines/samplers/classifier_free_guidance_mixin.py +12 -0
  47. trellis/pipelines/samplers/flow_euler.py +205 -0
  48. trellis/pipelines/samplers/guidance_interval_mixin.py +15 -0
  49. trellis/pipelines/trellis_image_to_3d.py +426 -0
  50. trellis/pipelines/trellis_image_to_3d_cpu.py +726 -0
.gitignore CHANGED
@@ -4,8 +4,7 @@ __pycache__/
4
  *.pyc
5
  cache/
6
  tmp/
7
- trellis/
8
  extensions/
9
  wheels/
10
  TRELLIS.2/
11
- old/
 
4
  *.pyc
5
  cache/
6
  tmp/
 
7
  extensions/
8
  wheels/
9
  TRELLIS.2/
10
+ old/
Dockerfile CHANGED
@@ -7,7 +7,12 @@ WORKDIR /app
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
8
  python3 \
9
  python3-pip \
 
 
 
10
  libgl1-mesa-glx \
 
 
11
  libglib2.0-0 \
12
  libsm6 \
13
  libxext6 \
@@ -73,4 +78,4 @@ RUN wget -P assets/example_multi_image https://raw.githubusercontent.com/trellis
73
  EXPOSE 7860
74
 
75
  # Command to run the application
76
- CMD ["python", "app.py"]
 
7
  RUN apt-get update && apt-get install -y --no-install-recommends \
8
  python3 \
9
  python3-pip \
10
+ python3-dev \
11
+ build-essential \
12
+ cmake \
13
  libgl1-mesa-glx \
14
+ libgl1-mesa-dev \
15
+ libegl1 \
16
  libglib2.0-0 \
17
  libsm6 \
18
  libxext6 \
 
78
  EXPOSE 7860
79
 
80
  # Command to run the application
81
+ CMD ["python", "app.py"]
app.py CHANGED
@@ -1,38 +1,51 @@
1
  import argparse
 
2
  import os
3
  import sys
4
  import time
 
 
5
 
6
- os.environ["OPENCV_IO_ENABLE_OPENEXR"] = '1'
7
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
8
- os.environ["ATTN_BACKEND"] = "flash_attn_3"
9
- os.environ["FLEX_GEMM_AUTOTUNE_CACHE_PATH"] = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'autotune_cache.json')
10
- os.environ["FLEX_GEMM_AUTOTUNER_VERBOSE"] = '1'
 
 
 
11
  os.environ.setdefault("TRELLIS_REMBG_MODEL", "briaai/RMBG-2.0")
12
 
 
13
  import gradio as gr
 
 
14
  import spaces
15
- from gradio_litmodel3d import LitModel3D
16
- sys.path.append(os.getcwd())
17
- import cv2
18
- from typing import *
19
  import torch
20
- import numpy as np
21
- import imageio
22
- from PIL import Image
23
  import trimesh
24
- from datetime import datetime
25
- import logging
 
 
 
 
 
 
 
 
26
 
27
  from trellis2.pipelines import Trellis2ImageTo3DPipeline
28
  from trellis2.renderers import EnvMap
29
- from trellis2.utils import render_utils
 
30
  import o_voxel
31
 
32
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
 
 
33
  logger = logging.getLogger(__name__)
34
 
35
- parser = argparse.ArgumentParser(description="Pocket 3D AI 2")
36
  parser.add_argument("--prod", action="store_true", help="Run in production mode")
37
  parser.add_argument("--port", type=int, help="Port to run the server on (default: 8081 for prod, 8080 for dev)")
38
  cmd_args, _unknown_args = parser.parse_known_args()
@@ -42,8 +55,13 @@ port = cmd_args.port if cmd_args.port else (8081 if prod else 8080)
42
  show_options = not prod
43
  RUNNING_ON_SPACES = bool(os.getenv("SPACE_ID"))
44
 
 
 
 
 
 
45
  MAX_SEED = np.iinfo(np.int32).max
46
- TMP_DIR = os.path.join('cache')
47
  os.makedirs(TMP_DIR, exist_ok=True)
48
 
49
  if gr.NO_RELOAD:
@@ -55,36 +73,45 @@ if gr.NO_RELOAD:
55
  if pipeline is not None:
56
  return
57
 
58
- logger.info("Initializing TRELLIS.2 pipeline...")
59
  start_time = time.time()
60
 
61
  try:
62
- pipeline = Trellis2ImageTo3DPipeline.from_pretrained('microsoft/TRELLIS.2-4B')
63
- pipeline.low_vram = False
64
- pipeline._device = 'cpu'
65
- logger.info(f"Background remover model: {os.getenv('TRELLIS_REMBG_MODEL', 'default')}")
66
-
67
- envmap = {}
68
- for name in ['forest', 'sunset', 'courtyard']:
69
- exr_path = os.path.join('assets', 'hdri', f'{name}.exr')
70
- if os.path.exists(exr_path):
71
- exr = cv2.imread(exr_path, cv2.IMREAD_UNCHANGED)
72
- if exr is None:
73
- continue
74
- if RUNNING_ON_SPACES:
75
- exr = cv2.resize(exr, (512, 256), interpolation=cv2.INTER_AREA)
76
- envmap[name] = cv2.cvtColor(exr, cv2.COLOR_BGR2RGB)
77
-
78
- logger.info(f"Pipeline initialized in {time.time() - start_time:.2f} seconds.")
 
 
 
 
 
 
 
 
 
79
  except Exception as e:
80
- logger.error(f"Failed to initialize pipeline: {e}", exc_info=True)
81
  pipeline = None
82
  raise
83
 
84
  initialize_pipeline()
85
 
86
 
87
- def clear_cuda_cache():
88
  if torch.cuda.is_available():
89
  torch.cuda.empty_cache()
90
 
@@ -92,6 +119,7 @@ def clear_cuda_cache():
92
  def normalize_video_frames(frames: Any) -> List[np.ndarray]:
93
  if frames is None:
94
  return []
 
95
  def _normalize_frame(arr: np.ndarray) -> Optional[np.ndarray]:
96
  if arr is None or arr.ndim != 3:
97
  return None
@@ -106,6 +134,7 @@ def normalize_video_frames(frames: Any) -> List[np.ndarray]:
106
  arr = (arr + 1.0) * 127.5
107
  arr = np.clip(arr, 0.0, 255.0)
108
  return arr.astype(np.uint8)
 
109
  if isinstance(frames, np.ndarray):
110
  if frames.ndim == 4:
111
  return [nf for f in frames if (nf := _normalize_frame(f)) is not None]
@@ -113,6 +142,7 @@ def normalize_video_frames(frames: Any) -> List[np.ndarray]:
113
  nf = _normalize_frame(frames)
114
  return [nf] if nf is not None else []
115
  return []
 
116
  normalized = []
117
  for frame in frames:
118
  if frame is None:
@@ -142,7 +172,7 @@ def write_mp4(video_path: str, frames: List[np.ndarray], fps: int = 15) -> bool:
142
  if os.path.exists(video_path) and os.path.getsize(video_path) > 0:
143
  return True
144
  except Exception as ffmpeg_err:
145
- logger.warning(f"FFMPEG video writer failed: {ffmpeg_err}")
146
 
147
  try:
148
  h, w = frames[0].shape[:2]
@@ -157,7 +187,7 @@ def write_mp4(video_path: str, frames: List[np.ndarray], fps: int = 15) -> bool:
157
  if os.path.exists(video_path) and os.path.getsize(video_path) > 0:
158
  return True
159
  except Exception as opencv_err:
160
- logger.error(f"OpenCV video writer failed: {opencv_err}", exc_info=True)
161
 
162
  return False
163
 
@@ -177,7 +207,7 @@ def preprocess_image(image: Optional[Image.Image]) -> Optional[Image.Image]:
177
  try:
178
  return pipeline.preprocess_image(image)
179
  except Exception as e:
180
- logger.error(f"Error during image preprocessing: {e}", exc_info=True)
181
  return None
182
 
183
 
@@ -187,7 +217,7 @@ def get_seed(randomize_seed: bool, seed: int) -> int:
187
 
188
  def export_stl_from_glb(glb_path: str) -> Optional[str]:
189
  stl_path = None
190
- mesh_data = trimesh.load_mesh(glb_path, force='mesh')
191
  mesh_to_export = None
192
 
193
  if isinstance(mesh_data, trimesh.Scene):
@@ -210,206 +240,417 @@ def export_stl_from_glb(glb_path: str) -> Optional[str]:
210
  if current_size > 0:
211
  mesh_to_export.vertices *= target_size_mm / current_size
212
  current_time_stl = datetime.now().strftime("%Y-%m%d-%H%M%S-%f")
213
- stl_path = os.path.join(TMP_DIR, f'{current_time_stl}.stl')
214
  mesh_to_export.export(stl_path)
215
- logger.info(f"STL exported: {stl_path}")
216
 
217
  return stl_path
218
 
219
 
220
- @spaces.GPU(duration=180)
221
- def process_image_yielding(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
222
  image: Optional[Image.Image],
223
  seed: int,
224
- resolution: str,
225
  ss_guidance_strength: float,
226
  ss_sampling_steps: int,
227
- shape_guidance_strength: float,
228
- shape_sampling_steps: int,
229
- tex_guidance_strength: float,
230
- tex_sampling_steps: int,
231
- mesh_simplify: int,
232
- texture_size: int,
233
- do_preprocess: bool,
234
  req: gr.Request,
235
- progress=gr.Progress(track_tqdm=True)
236
- ) -> Generator:
237
  if image is None or pipeline is None:
238
- yield (
239
- None,
240
- gr.update(value=None, visible=True, label="Preview unavailable"),
241
- gr.update(value=None, visible=True, interactive=False),
242
- )
243
- return
244
 
245
  if RUNNING_ON_SPACES:
246
- if resolution != "512":
247
- logger.info(f"ZeroGPU mode: forcing resolution {resolution} -> 512.")
248
- resolution = "512"
249
  ss_sampling_steps = min(ss_sampling_steps, 6)
250
- shape_sampling_steps = min(shape_sampling_steps, 6)
251
- tex_sampling_steps = min(tex_sampling_steps, 6)
252
- mesh_simplify = min(mesh_simplify, 160000)
253
- texture_size = min(texture_size, 512)
254
 
255
- try:
256
- job_start = time.time()
257
- if not torch.cuda.is_available():
258
- raise gr.Error("GPU is not ready. Please retry in a few seconds.")
259
-
260
- pipeline.cuda()
261
- loaded_envmap = {}
262
- for name, exr_data in envmap.items():
263
- loaded_envmap[name] = EnvMap(torch.tensor(exr_data, dtype=torch.float32, device='cuda'))
264
- preview_envmap = loaded_envmap.get("sunset") if loaded_envmap else None
265
- if preview_envmap is None and loaded_envmap:
266
- preview_envmap = next(iter(loaded_envmap.values()))
267
-
268
- yield (
269
- None,
270
- gr.update(value=None, visible=True, label="Preparing preview..."),
271
- gr.update(value=None, visible=True, interactive=False),
272
- )
273
 
274
- if do_preprocess:
275
- progress(0.03, desc=f"Removing background... {time.time() - job_start:.1f}s")
276
- image = preprocess_image(image)
277
- if image is None:
278
- raise gr.Error("Image preprocessing failed. Please try a different image.")
279
- elif image.mode != "RGB":
280
- image = image.convert("RGB")
281
-
282
- progress(0.10, desc=f"Generating 3D structure... {time.time() - job_start:.1f}s")
283
- pipeline_start = time.time()
284
- outputs = pipeline.run(
285
- image,
286
- seed=seed,
287
- preprocess_image=False,
288
- sparse_structure_sampler_params={
289
- "steps": ss_sampling_steps,
290
- "guidance_strength": ss_guidance_strength,
291
- },
292
- shape_slat_sampler_params={
293
- "steps": shape_sampling_steps,
294
- "guidance_strength": shape_guidance_strength,
295
- },
296
- tex_slat_sampler_params={
297
- "steps": tex_sampling_steps,
298
- "guidance_strength": tex_guidance_strength,
299
- },
300
- pipeline_type={
301
- "512": "512",
302
- "1024": "1024_cascade",
303
- "1536": "1536_cascade",
304
- }[resolution],
305
- return_latent=False,
306
- )
307
- pipeline_elapsed = time.time() - pipeline_start
308
- logger.info(f"Pipeline Time: {pipeline_elapsed:.2f} seconds")
309
- progress(0.50, desc=f"Generated 3D structure in {pipeline_elapsed:.1f}s")
310
-
311
- mesh = outputs[0]
312
- grid_size = {"512": 512, "1024": 1024, "1536": 1536}[resolution]
313
-
314
- progress(0.60, desc=f"Rendering preview video... {time.time() - job_start:.1f}s")
315
- headers = req.headers if req else {}
316
- user_agent = headers.get("User-Agent", "").lower()
317
- is_mobile = any(d in user_agent for d in ["android", "iphone", "ipad", "mobile"])
318
- preview_seconds = 4
319
- if RUNNING_ON_SPACES:
320
- vid_resolution = 128 if is_mobile else 176
321
- preview_fps = 12
322
- else:
323
- vid_resolution = 256 if is_mobile else 384
324
- preview_fps = 15
325
- num_frames = preview_seconds * preview_fps
326
- video_path = None
327
 
328
- try:
329
- if preview_envmap is not None:
330
- vid_result = render_utils.render_video(mesh, resolution=vid_resolution, num_frames=num_frames, r=2, fov=36, envmap=preview_envmap)
331
- else:
332
- logger.warning("No envmap available for shaded preview; renderer may return normal-only frames.")
333
- vid_result = render_utils.render_video(mesh, resolution=vid_resolution, num_frames=num_frames, r=2, fov=36, envmap=loaded_envmap)
334
-
335
- shaded_frames = vid_result.get("shaded")
336
- if shaded_frames is None:
337
- shaded_keys = [k for k in vid_result.keys() if k.startswith("shaded_")]
338
- if shaded_keys:
339
- shaded_frames = vid_result[shaded_keys[0]]
340
- color_frames = normalize_video_frames(shaded_frames if shaded_frames is not None else vid_result.get('color', []))
341
- normal_frames = normalize_video_frames(vid_result.get('normal', []))
342
- logger.info(f"Video frame counts: color={len(color_frames)}, normal={len(normal_frames)}")
343
-
344
- current_time = datetime.now().strftime("%Y-%m%d-%H%M%S")
345
- video_path = os.path.join(TMP_DIR, f'{current_time}.mp4')
346
-
347
- if len(color_frames) > 0:
348
- wrote_video = write_mp4(video_path, color_frames, fps=preview_fps)
349
- elif len(normal_frames) > 0:
350
- logger.info("Using normal-frame fallback for preview video.")
351
- wrote_video = write_mp4(video_path, normal_frames, fps=preview_fps)
352
- else:
353
- wrote_video = False
354
- video_path = None
355
 
356
- if wrote_video and video_path:
357
- logger.info(f"Video rendered: {video_path} ({os.path.getsize(video_path)} bytes)")
358
- progress(0.72, desc=f"Preview ready in {time.time() - job_start:.1f}s")
 
 
 
 
 
 
 
 
 
 
359
  else:
360
- video_path = None
361
- logger.warning("Video file was not created; continuing with 3D model output.")
 
 
 
 
 
 
 
 
362
  except Exception as e:
363
- logger.error(f"Video rendering error: {e}", exc_info=True)
364
- logger.warning("Preview video generation failed; continuing with GLB extraction.")
 
 
 
 
 
 
 
 
 
 
 
 
 
365
 
366
- yield (
367
- None,
368
- video_path if video_path else None,
369
- gr.update(value=None, visible=True, interactive=False),
370
- )
371
 
372
- progress(0.80, desc=f"Extracting GLB model... {time.time() - job_start:.1f}s")
373
- glb_start = time.time()
374
- glb = o_voxel.postprocess.to_glb(
375
- vertices=mesh.vertices,
376
- faces=mesh.faces,
377
- attr_volume=mesh.attrs,
378
- coords=mesh.coords,
379
- attr_layout=pipeline.pbr_attr_layout,
380
- grid_size=grid_size,
381
- aabb=[[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]],
382
- decimation_target=mesh_simplify,
383
- texture_size=texture_size,
384
- remesh=not RUNNING_ON_SPACES,
385
- remesh_band=1,
386
- remesh_project=0,
387
- use_tqdm=False,
388
- )
389
- logger.info(f"GLB postprocess time: {time.time() - glb_start:.2f} seconds")
390
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
391
  current_time_glb = datetime.now().strftime("%Y-%m%d-%H%M%S-%f")
392
- glb_path = os.path.join(TMP_DIR, f'{current_time_glb}.glb')
393
  glb.export(glb_path)
394
- logger.info(f"GLB exported: {glb_path} ({os.path.getsize(glb_path)} bytes)")
395
 
396
- progress(0.92, desc="Exporting STL...")
397
- stl_path = None
398
- try:
399
- stl_path = export_stl_from_glb(glb_path)
400
- except Exception as stl_e:
401
- logger.error(f"STL export error: {stl_e}", exc_info=True)
402
 
403
- progress(1.0, desc=f"Done in {time.time() - job_start:.1f}s")
404
- logger.info(f"Viewer GLB selected: {glb_path}")
405
  stl_update = gr.update(value=stl_path, visible=True, interactive=bool(stl_path))
406
- yield glb_path, gr.skip(), stl_update
407
-
408
- except gr.Error:
 
 
409
  raise
410
  except Exception as e:
411
- logger.error(f"Generation error: {e}", exc_info=True)
412
- raise gr.Error("Generation failed before completion. Try again with resolution 512.")
413
  finally:
414
  clear_cuda_cache()
415
 
@@ -429,18 +670,24 @@ footer { visibility: hidden; }
429
  .custom-header { display: flex; align-items: center; height: 100%; }
430
  """
431
 
432
- with gr.Blocks(theme='Taithrah/Minimal', css=css, title="Pocket 3D AI") as demo:
433
- default_steps = 6 if RUNNING_ON_SPACES else 12
434
- default_resolution = "512" if RUNNING_ON_SPACES else "1024"
435
- resolution_options = ["512"] if RUNNING_ON_SPACES else ["512", "1024", "1536"]
436
- default_mesh_simplify = 160000 if RUNNING_ON_SPACES else 300000
437
- default_texture_size = 512 if RUNNING_ON_SPACES else 2048
438
- texture_min = 512 if RUNNING_ON_SPACES else 1024
439
- texture_max = 1024 if RUNNING_ON_SPACES else 4096
440
- texture_step = 512 if RUNNING_ON_SPACES else 1024
441
 
442
  with gr.Row(equal_height=True):
443
- gr.Image("assets/sb_pocket_logo_dark.png", show_label=False, container=False, show_download_button=False, min_width=50, interactive=False, show_fullscreen_button=False)
 
 
 
 
 
 
 
 
444
 
445
  with gr.Column():
446
  with gr.Row():
@@ -450,7 +697,7 @@ with gr.Blocks(theme='Taithrah/Minimal', css=css, title="Pocket 3D AI") as demo:
450
  format="png",
451
  image_mode="RGBA",
452
  type="pil",
453
- sources=['upload', 'clipboard'],
454
  container=True,
455
  mirror_webcam=True,
456
  visible=True,
@@ -473,10 +720,7 @@ with gr.Blocks(theme='Taithrah/Minimal', css=css, title="Pocket 3D AI") as demo:
473
  with gr.Row(equal_height=False):
474
  with gr.Column(scale=2, min_width=100, variant="default"):
475
  examples = gr.Examples(
476
- examples=[
477
- f'./assets/example_image/{image}'
478
- for image in os.listdir("./assets/example_image")
479
- ],
480
  inputs=[image_prompt],
481
  examples_per_page=9,
482
  )
@@ -491,58 +735,72 @@ with gr.Blocks(theme='Taithrah/Minimal', css=css, title="Pocket 3D AI") as demo:
491
  elem_classes="model-container",
492
  visible=True,
493
  )
494
- stl_download_button = gr.DownloadButton(label="Download STL", visible=True, interactive=False, size="lg", variant="primary")
 
 
 
 
 
 
495
 
496
  with gr.Accordion(label="Generation Settings", open=False, visible=show_options and not RUNNING_ON_SPACES):
497
  seed_slider = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
498
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
499
- resolution_radio = gr.Radio(resolution_options, label="Resolution", value=default_resolution)
500
  gr.Markdown("Stage 1: Sparse Structure Generation")
501
  with gr.Row():
502
- ss_guidance_strength = gr.Slider(1.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
503
- ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=default_steps, step=1)
504
- gr.Markdown("Stage 2: Shape Generation")
505
- with gr.Row():
506
- shape_guidance_strength = gr.Slider(1.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
507
- shape_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=default_steps, step=1)
508
- gr.Markdown("Stage 3: Texture Generation")
509
  with gr.Row():
510
- tex_guidance_strength = gr.Slider(1.0, 10.0, label="Guidance Strength", value=1.0, step=0.1)
511
- tex_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=default_steps, step=1)
 
 
 
 
 
 
 
 
512
 
513
  with gr.Accordion(label="GLB Extraction Settings", open=False, visible=show_options and not RUNNING_ON_SPACES):
514
- mesh_simplify = gr.Slider(100000, 500000, label="Decimation Target", value=default_mesh_simplify, step=10000)
515
  texture_size = gr.Slider(texture_min, texture_max, label="Texture Size", value=default_texture_size, step=texture_step)
516
 
517
- preprocess_on = gr.Checkbox(value=True, visible=False)
518
- preprocess_off = gr.Checkbox(value=False, visible=False)
 
 
 
519
 
520
  demo.load(start_session)
521
  demo.unload(end_session)
522
 
523
- shared_inputs = [
524
  image_prompt,
525
  seed_slider,
526
- resolution_radio,
527
- ss_guidance_strength, ss_sampling_steps,
528
- shape_guidance_strength, shape_sampling_steps,
529
- tex_guidance_strength, tex_sampling_steps,
530
- mesh_simplify, texture_size,
531
  ]
532
- upload_inputs = shared_inputs + [preprocess_on]
533
- example_inputs = shared_inputs + [preprocess_off]
534
- generation_outputs = [model_output, video_output, stl_download_button]
535
 
536
  image_prompt.upload(
537
  get_seed,
538
  inputs=[randomize_seed, seed_slider],
539
  outputs=[seed_slider],
540
  show_progress="minimal",
541
- trigger_mode="always_last"
 
 
 
 
 
 
542
  ).then(
543
- fn=process_image_yielding,
544
- inputs=upload_inputs,
545
- outputs=generation_outputs,
546
  show_progress="minimal",
547
  scroll_to_output=True,
548
  )
@@ -554,37 +812,45 @@ with gr.Blocks(theme='Taithrah/Minimal', css=css, title="Pocket 3D AI") as demo:
554
  show_progress="minimal",
555
  trigger_mode="always_last",
556
  ).then(
557
- fn=process_image_yielding,
558
- inputs=example_inputs,
559
- outputs=generation_outputs,
 
 
 
 
 
 
560
  show_progress="minimal",
561
  scroll_to_output=True,
562
  )
563
 
 
564
  if __name__ == "__main__":
565
  if pipeline is None:
566
  logger.critical("Pipeline failed to initialize. Exiting.")
567
  sys.exit(1)
568
 
 
569
  if RUNNING_ON_SPACES:
570
  logger.info("Launching on HuggingFace Spaces")
571
  demo.queue(max_size=8, default_concurrency_limit=1, api_open=False).launch(
572
  show_api=False,
573
  share=False,
574
- allowed_paths=["./cache", "./assets"]
575
  )
576
  elif prod:
577
- logger.info(f"Launching in PRODUCTION mode on port {port}")
578
  demo.queue(max_size=20, default_concurrency_limit=5).launch(
579
  server_name="0.0.0.0",
580
  server_port=port,
581
  show_api=False,
582
  favicon_path="assets/sb_3d_ai_logo.png",
583
  share=False,
584
- allowed_paths=["./cache", "./assets"]
585
  )
586
  else:
587
- logger.info(f"Launching in DEVELOPMENT mode on port {port}")
588
  demo.queue(api_open=False).launch(
589
  server_name="0.0.0.0",
590
  server_port=port,
@@ -592,5 +858,5 @@ if __name__ == "__main__":
592
  favicon_path="assets/sb_3d_ai_logo.png",
593
  debug=True,
594
  share=True,
595
- allowed_paths=["./cache", "./assets"]
596
  )
 
1
  import argparse
2
+ import concurrent.futures
3
  import os
4
  import sys
5
  import time
6
+ from datetime import datetime
7
+ from typing import Any, Dict, Generator, List, Optional, Tuple
8
 
9
+ os.environ["OPENCV_IO_ENABLE_OPENEXR"] = "1"
10
  os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
11
+ os.environ.setdefault("ATTN_BACKEND", "flash_attn_3")
12
+ os.environ.setdefault("SPCONV_ALGO", "native")
13
+ os.environ["FLEX_GEMM_AUTOTUNE_CACHE_PATH"] = os.path.join(
14
+ os.path.dirname(os.path.abspath(__file__)), "autotune_cache.json"
15
+ )
16
+ os.environ["FLEX_GEMM_AUTOTUNER_VERBOSE"] = "1"
17
  os.environ.setdefault("TRELLIS_REMBG_MODEL", "briaai/RMBG-2.0")
18
 
19
+ import cv2
20
  import gradio as gr
21
+ import imageio
22
+ import numpy as np
23
  import spaces
 
 
 
 
24
  import torch
 
 
 
25
  import trimesh
26
+ from PIL import Image
27
+ from easydict import EasyDict as edict
28
+ from gradio_litmodel3d import LitModel3D
29
+
30
+ sys.path.append(os.getcwd())
31
+
32
+ from trellis.pipelines import TrellisImageTo3DPipeline
33
+ from trellis.representations import Gaussian
34
+ from trellis.utils import postprocessing_utils as trellis_postprocessing_utils
35
+ from trellis.utils import render_utils as trellis_render_utils
36
 
37
  from trellis2.pipelines import Trellis2ImageTo3DPipeline
38
  from trellis2.renderers import EnvMap
39
+ from trellis2.utils import render_utils as trellis2_render_utils
40
+
41
  import o_voxel
42
 
43
+ import logging
44
+
45
+ logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
46
  logger = logging.getLogger(__name__)
47
 
48
+ parser = argparse.ArgumentParser(description="Pocket 3D AI")
49
  parser.add_argument("--prod", action="store_true", help="Run in production mode")
50
  parser.add_argument("--port", type=int, help="Port to run the server on (default: 8081 for prod, 8080 for dev)")
51
  cmd_args, _unknown_args = parser.parse_known_args()
 
55
  show_options = not prod
56
  RUNNING_ON_SPACES = bool(os.getenv("SPACE_ID"))
57
 
58
+ TRELLIS_RUNTIME = os.getenv("TRELLIS_RUNTIME", "original").strip().lower()
59
+ if TRELLIS_RUNTIME not in {"original", "trellis2"}:
60
+ logger.warning("Unknown TRELLIS_RUNTIME=%s, defaulting to 'original'.", TRELLIS_RUNTIME)
61
+ TRELLIS_RUNTIME = "original"
62
+
63
  MAX_SEED = np.iinfo(np.int32).max
64
+ TMP_DIR = os.path.join("cache")
65
  os.makedirs(TMP_DIR, exist_ok=True)
66
 
67
  if gr.NO_RELOAD:
 
73
  if pipeline is not None:
74
  return
75
 
76
+ logger.info("Initializing runtime '%s'...", TRELLIS_RUNTIME)
77
  start_time = time.time()
78
 
79
  try:
80
+ if TRELLIS_RUNTIME == "original":
81
+ pipeline = TrellisImageTo3DPipeline.from_pretrained(
82
+ "JeffreyXiang/TRELLIS-image-large",
83
+ formats=["mesh", "gaussian"],
84
+ )
85
+ if hasattr(pipeline, "_move_all_models_to_cpu"):
86
+ pipeline._move_all_models_to_cpu()
87
+ logger.info("Using original TRELLIS runtime.")
88
+ else:
89
+ pipeline = Trellis2ImageTo3DPipeline.from_pretrained("microsoft/TRELLIS.2-4B")
90
+ pipeline.low_vram = False
91
+ pipeline._device = "cpu"
92
+
93
+ envmap = {}
94
+ for name in ["forest", "sunset", "courtyard"]:
95
+ exr_path = os.path.join("assets", "hdri", f"{name}.exr")
96
+ if os.path.exists(exr_path):
97
+ exr = cv2.imread(exr_path, cv2.IMREAD_UNCHANGED)
98
+ if exr is None:
99
+ continue
100
+ if RUNNING_ON_SPACES:
101
+ exr = cv2.resize(exr, (512, 256), interpolation=cv2.INTER_AREA)
102
+ envmap[name] = cv2.cvtColor(exr, cv2.COLOR_BGR2RGB)
103
+ logger.info("Using TRELLIS.2 runtime.")
104
+
105
+ logger.info("Pipeline initialized in %.2fs.", time.time() - start_time)
106
  except Exception as e:
107
+ logger.error("Failed to initialize pipeline: %s", e, exc_info=True)
108
  pipeline = None
109
  raise
110
 
111
  initialize_pipeline()
112
 
113
 
114
+ def clear_cuda_cache() -> None:
115
  if torch.cuda.is_available():
116
  torch.cuda.empty_cache()
117
 
 
119
  def normalize_video_frames(frames: Any) -> List[np.ndarray]:
120
  if frames is None:
121
  return []
122
+
123
  def _normalize_frame(arr: np.ndarray) -> Optional[np.ndarray]:
124
  if arr is None or arr.ndim != 3:
125
  return None
 
134
  arr = (arr + 1.0) * 127.5
135
  arr = np.clip(arr, 0.0, 255.0)
136
  return arr.astype(np.uint8)
137
+
138
  if isinstance(frames, np.ndarray):
139
  if frames.ndim == 4:
140
  return [nf for f in frames if (nf := _normalize_frame(f)) is not None]
 
142
  nf = _normalize_frame(frames)
143
  return [nf] if nf is not None else []
144
  return []
145
+
146
  normalized = []
147
  for frame in frames:
148
  if frame is None:
 
172
  if os.path.exists(video_path) and os.path.getsize(video_path) > 0:
173
  return True
174
  except Exception as ffmpeg_err:
175
+ logger.warning("FFMPEG video writer failed: %s", ffmpeg_err)
176
 
177
  try:
178
  h, w = frames[0].shape[:2]
 
187
  if os.path.exists(video_path) and os.path.getsize(video_path) > 0:
188
  return True
189
  except Exception as opencv_err:
190
+ logger.error("OpenCV video writer failed: %s", opencv_err, exc_info=True)
191
 
192
  return False
193
 
 
207
  try:
208
  return pipeline.preprocess_image(image)
209
  except Exception as e:
210
+ logger.error("Error during image preprocessing: %s", e, exc_info=True)
211
  return None
212
 
213
 
 
217
 
218
  def export_stl_from_glb(glb_path: str) -> Optional[str]:
219
  stl_path = None
220
+ mesh_data = trimesh.load_mesh(glb_path, force="mesh")
221
  mesh_to_export = None
222
 
223
  if isinstance(mesh_data, trimesh.Scene):
 
240
  if current_size > 0:
241
  mesh_to_export.vertices *= target_size_mm / current_size
242
  current_time_stl = datetime.now().strftime("%Y-%m%d-%H%M%S-%f")
243
+ stl_path = os.path.join(TMP_DIR, f"{current_time_stl}.stl")
244
  mesh_to_export.export(stl_path)
245
+ logger.info("STL exported: %s", stl_path)
246
 
247
  return stl_path
248
 
249
 
250
+ def get_preview_settings(req: Optional[gr.Request]) -> Tuple[bool, int, int, int]:
251
+ headers = req.headers if req else {}
252
+ user_agent = headers.get("User-Agent", "").lower()
253
+ is_mobile = any(d in user_agent for d in ["android", "iphone", "ipad", "mobile"])
254
+ if RUNNING_ON_SPACES:
255
+ resolution = 128 if is_mobile else 176
256
+ fps = 12
257
+ seconds = 3
258
+ else:
259
+ resolution = 256 if is_mobile else 384
260
+ fps = 15
261
+ seconds = 4
262
+ return is_mobile, resolution, fps, seconds
263
+
264
+
265
+ def pack_original_state(outputs: Dict[str, Any]) -> Dict[str, Any]:
266
+ gaussian = outputs["gaussian"][0]
267
+ mesh = outputs["mesh"][0]
268
+ return {
269
+ "runtime": "original",
270
+ "gaussian": {
271
+ "init_params": gaussian.init_params,
272
+ "_xyz": gaussian._xyz.detach().cpu().numpy(),
273
+ "_features_dc": gaussian._features_dc.detach().cpu().numpy(),
274
+ "_scaling": gaussian._scaling.detach().cpu().numpy(),
275
+ "_rotation": gaussian._rotation.detach().cpu().numpy(),
276
+ "_opacity": gaussian._opacity.detach().cpu().numpy(),
277
+ },
278
+ "mesh": {
279
+ "vertices": mesh.vertices.detach().cpu().numpy(),
280
+ "faces": mesh.faces.detach().cpu().numpy(),
281
+ },
282
+ }
283
+
284
+
285
+ def unpack_original_state(state: Dict[str, Any]) -> Tuple[Gaussian, edict]:
286
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
287
+ ginfo = state["gaussian"]
288
+ gaussian = Gaussian(**ginfo["init_params"])
289
+ gaussian._xyz = torch.tensor(ginfo["_xyz"], device=device)
290
+ gaussian._features_dc = torch.tensor(ginfo["_features_dc"], device=device)
291
+ gaussian._scaling = torch.tensor(ginfo["_scaling"], device=device)
292
+ gaussian._rotation = torch.tensor(ginfo["_rotation"], device=device)
293
+ gaussian._opacity = torch.tensor(ginfo["_opacity"], device=device)
294
+
295
+ mesh = edict(
296
+ vertices=torch.tensor(state["mesh"]["vertices"], device=device),
297
+ faces=torch.tensor(state["mesh"]["faces"], device=device),
298
+ )
299
+ return gaussian, mesh
300
+
301
+
302
+ def pack_trellis2_state(mesh: Any, grid_size: int) -> Dict[str, Any]:
303
+ return {
304
+ "runtime": "trellis2",
305
+ "mesh": {
306
+ "vertices": mesh.vertices.detach().cpu().numpy(),
307
+ "faces": mesh.faces.detach().cpu().numpy(),
308
+ "attrs": mesh.attrs.detach().cpu().numpy(),
309
+ "coords": mesh.coords.detach().cpu().numpy(),
310
+ "voxel_shape": list(mesh.voxel_shape),
311
+ "layout": {k: [v.start, v.stop] for k, v in mesh.layout.items()},
312
+ },
313
+ "grid_size": grid_size,
314
+ }
315
+
316
+
317
+ def unpack_trellis2_state(state: Dict[str, Any]) -> Dict[str, Any]:
318
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
319
+ mesh_info = state["mesh"]
320
+ attr_layout = {k: slice(v[0], v[1]) for k, v in mesh_info["layout"].items()}
321
+ return {
322
+ "vertices": torch.tensor(mesh_info["vertices"], device=device, dtype=torch.float32),
323
+ "faces": torch.tensor(mesh_info["faces"], device=device, dtype=torch.int32),
324
+ "attrs": torch.tensor(mesh_info["attrs"], device=device, dtype=torch.float32),
325
+ "coords": torch.tensor(mesh_info["coords"], device=device, dtype=torch.int32),
326
+ "voxel_shape": torch.Size(mesh_info["voxel_shape"]),
327
+ "attr_layout": attr_layout,
328
+ "grid_size": int(state["grid_size"]),
329
+ }
330
+
331
+
332
+ def render_original_preview(outputs: Dict[str, Any], req: gr.Request) -> Optional[str]:
333
+ is_mobile, resolution, fps, seconds = get_preview_settings(req)
334
+ num_frames = seconds * fps
335
+
336
+ with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
337
+ future_color = executor.submit(
338
+ trellis_render_utils.render_video,
339
+ outputs["gaussian"][0],
340
+ resolution=resolution,
341
+ num_frames=num_frames,
342
+ mode="color",
343
+ verbose=False,
344
+ )
345
+ future_normal = executor.submit(
346
+ trellis_render_utils.render_video,
347
+ outputs["mesh"][0],
348
+ resolution=resolution,
349
+ num_frames=num_frames,
350
+ mode="normal",
351
+ verbose=False,
352
+ )
353
+ color_result = future_color.result()
354
+ normal_result = future_normal.result()
355
+
356
+ color_frames = color_result.get("color", []) if color_result else []
357
+ normal_frames = normal_result.get("normal", []) if normal_result else []
358
+
359
+ if not color_frames or not normal_frames:
360
+ logger.warning("Preview rendering returned no frames.")
361
+ return None
362
+
363
+ frame_count = min(len(color_frames), len(normal_frames))
364
+ combined = []
365
+ for i in range(frame_count):
366
+ if is_mobile:
367
+ frame = np.concatenate([color_frames[i], normal_frames[i]], axis=0)
368
+ else:
369
+ frame = np.concatenate([color_frames[i], normal_frames[i]], axis=1)
370
+ combined.append(frame)
371
+
372
+ current_time = datetime.now().strftime("%Y-%m%d-%H%M%S")
373
+ video_path = os.path.join(TMP_DIR, f"{current_time}.mp4")
374
+ if write_mp4(video_path, combined, fps=fps):
375
+ return video_path
376
+ return None
377
+
378
+
379
+ def render_trellis2_preview(mesh: Any, req: gr.Request) -> Optional[str]:
380
+ _is_mobile, resolution, fps, seconds = get_preview_settings(req)
381
+ num_frames = seconds * fps
382
+
383
+ loaded_envmap = {}
384
+ for name, exr_data in (envmap or {}).items():
385
+ loaded_envmap[name] = EnvMap(torch.tensor(exr_data, dtype=torch.float32, device="cuda"))
386
+
387
+ preview_envmap = loaded_envmap.get("sunset") if loaded_envmap else None
388
+ if preview_envmap is None and loaded_envmap:
389
+ preview_envmap = next(iter(loaded_envmap.values()))
390
+
391
+ if preview_envmap is not None:
392
+ vid_result = trellis2_render_utils.render_video(
393
+ mesh,
394
+ resolution=resolution,
395
+ num_frames=num_frames,
396
+ r=2,
397
+ fov=36,
398
+ envmap=preview_envmap,
399
+ )
400
+ else:
401
+ vid_result = trellis2_render_utils.render_video(
402
+ mesh,
403
+ resolution=resolution,
404
+ num_frames=num_frames,
405
+ r=2,
406
+ fov=36,
407
+ envmap=loaded_envmap,
408
+ )
409
+
410
+ shaded_frames = vid_result.get("shaded")
411
+ if shaded_frames is None:
412
+ shaded_keys = [k for k in vid_result.keys() if k.startswith("shaded_")]
413
+ if shaded_keys:
414
+ shaded_frames = vid_result[shaded_keys[0]]
415
+
416
+ color_frames = normalize_video_frames(shaded_frames if shaded_frames is not None else vid_result.get("color", []))
417
+ normal_frames = normalize_video_frames(vid_result.get("normal", []))
418
+
419
+ if len(color_frames) == 0 and len(normal_frames) == 0:
420
+ return None
421
+
422
+ current_time = datetime.now().strftime("%Y-%m%d-%H%M%S")
423
+ video_path = os.path.join(TMP_DIR, f"{current_time}.mp4")
424
+ if len(color_frames) > 0:
425
+ ok = write_mp4(video_path, color_frames, fps=fps)
426
+ else:
427
+ ok = write_mp4(video_path, normal_frames, fps=fps)
428
+ return video_path if ok else None
429
+
430
+
431
+ def _run_original_pipeline(
432
+ image: Image.Image,
433
+ seed: int,
434
+ ss_guidance_strength: float,
435
+ ss_sampling_steps: int,
436
+ slat_guidance_strength: float,
437
+ slat_sampling_steps: int,
438
+ ) -> Dict[str, Any]:
439
+ return pipeline.run(
440
+ image,
441
+ seed=seed,
442
+ formats=["gaussian", "mesh"],
443
+ preprocess_image=False,
444
+ sparse_structure_sampler_params={
445
+ "steps": ss_sampling_steps,
446
+ "cfg_strength": ss_guidance_strength,
447
+ },
448
+ slat_sampler_params={
449
+ "steps": slat_sampling_steps,
450
+ "cfg_strength": slat_guidance_strength,
451
+ },
452
+ )
453
+
454
+
455
+ def _run_trellis2_pipeline(
456
+ image: Image.Image,
457
+ seed: int,
458
+ ss_guidance_strength: float,
459
+ ss_sampling_steps: int,
460
+ slat_guidance_strength: float,
461
+ slat_sampling_steps: int,
462
+ ) -> Tuple[Any, int]:
463
+ pipeline.cuda()
464
+ if RUNNING_ON_SPACES:
465
+ pipeline_type = "512"
466
+ grid_size = 512
467
+ else:
468
+ pipeline_type = "1024_cascade"
469
+ grid_size = 1024
470
+
471
+ outputs = pipeline.run(
472
+ image,
473
+ seed=seed,
474
+ preprocess_image=False,
475
+ sparse_structure_sampler_params={
476
+ "steps": ss_sampling_steps,
477
+ "guidance_strength": ss_guidance_strength,
478
+ },
479
+ shape_slat_sampler_params={
480
+ "steps": slat_sampling_steps,
481
+ "guidance_strength": slat_guidance_strength,
482
+ },
483
+ tex_slat_sampler_params={
484
+ "steps": slat_sampling_steps,
485
+ "guidance_strength": slat_guidance_strength,
486
+ },
487
+ pipeline_type=pipeline_type,
488
+ return_latent=False,
489
+ )
490
+ return outputs[0], grid_size
491
+
492
+
493
+ @spaces.GPU(duration=160)
494
+ def generate_preview_and_state(
495
  image: Optional[Image.Image],
496
  seed: int,
 
497
  ss_guidance_strength: float,
498
  ss_sampling_steps: int,
499
+ slat_guidance_strength: float,
500
+ slat_sampling_steps: int,
 
 
 
 
 
501
  req: gr.Request,
502
+ progress=gr.Progress(track_tqdm=True),
503
+ ) -> Tuple[Optional[Dict[str, Any]], Optional[str], Dict[str, Any]]:
504
  if image is None or pipeline is None:
505
+ return None, None, gr.update(value=None, visible=True, interactive=False)
506
+
507
+ if not torch.cuda.is_available():
508
+ raise gr.Error("GPU is not ready. Please retry in a few seconds.")
 
 
509
 
510
  if RUNNING_ON_SPACES:
 
 
 
511
  ss_sampling_steps = min(ss_sampling_steps, 6)
512
+ slat_sampling_steps = min(slat_sampling_steps, 4)
 
 
 
513
 
514
+ job_start = time.time()
515
+ preprocess_start = time.time()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
516
 
517
+ image = preprocess_image(image)
518
+ if image is None:
519
+ raise gr.Error("Image preprocessing failed. Please try a different image.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
520
 
521
+ logger.info("Preprocess Time: %.2fs", time.time() - preprocess_start)
522
+ progress(0.1, desc=f"Preprocessed image in {time.time() - preprocess_start:.1f}s")
523
+
524
+ attempts = [
525
+ (ss_sampling_steps, slat_sampling_steps),
526
+ (max(2, min(ss_sampling_steps, 4)), max(2, min(slat_sampling_steps, 3))),
527
+ ]
528
+
529
+ outputs = None
530
+ mesh = None
531
+ grid_size = 512
532
+ last_error = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
534
+ for attempt_idx, (ss_steps, slat_steps) in enumerate(attempts):
535
+ inference_start = time.time()
536
+ try:
537
+ progress(0.2, desc=f"Generating 3D structure (attempt {attempt_idx + 1})...")
538
+ if TRELLIS_RUNTIME == "original":
539
+ outputs = _run_original_pipeline(
540
+ image,
541
+ seed,
542
+ ss_guidance_strength,
543
+ ss_steps,
544
+ slat_guidance_strength,
545
+ slat_steps,
546
+ )
547
  else:
548
+ mesh, grid_size = _run_trellis2_pipeline(
549
+ image,
550
+ seed,
551
+ ss_guidance_strength,
552
+ ss_steps,
553
+ slat_guidance_strength,
554
+ slat_steps,
555
+ )
556
+ logger.info("Inference Time (attempt %d): %.2fs", attempt_idx + 1, time.time() - inference_start)
557
+ break
558
  except Exception as e:
559
+ last_error = e
560
+ logger.warning("Inference attempt %d failed: %s", attempt_idx + 1, e, exc_info=True)
561
+ if attempt_idx == len(attempts) - 1:
562
+ clear_cuda_cache()
563
+ raise gr.Error("Generation failed after retry. Try another image or lower complexity.") from e
564
+
565
+ preview_start = time.time()
566
+ progress(0.6, desc=f"Rendering preview... {time.time() - job_start:.1f}s")
567
+
568
+ if TRELLIS_RUNTIME == "original":
569
+ video_path = render_original_preview(outputs, req)
570
+ state = pack_original_state(outputs)
571
+ else:
572
+ video_path = render_trellis2_preview(mesh, req)
573
+ state = pack_trellis2_state(mesh, grid_size)
574
 
575
+ logger.info("Preview Render Time: %.2fs", time.time() - preview_start)
576
+ logger.info("Phase A Total Time: %.2fs", time.time() - job_start)
 
 
 
577
 
578
+ clear_cuda_cache()
579
+ return state, video_path, gr.update(value=None, visible=True, interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580
 
581
+
582
+ @spaces.GPU(duration=180)
583
+ def extract_model_from_state(
584
+ state: Optional[Dict[str, Any]],
585
+ mesh_simplify: float,
586
+ texture_size: int,
587
+ req: gr.Request,
588
+ progress=gr.Progress(track_tqdm=True),
589
+ ) -> Tuple[Optional[str], Dict[str, Any]]:
590
+ if state is None:
591
+ return None, gr.update(value=None, visible=True, interactive=False)
592
+
593
+ if not torch.cuda.is_available():
594
+ raise gr.Error("GPU is not ready. Please retry in a few seconds.")
595
+
596
+ job_start = time.time()
597
+ progress(0.1, desc="Extracting model...")
598
+
599
+ try:
600
+ if state.get("runtime") == "original":
601
+ gaussian, mesh = unpack_original_state(state)
602
+ texture_mode = "fast" if RUNNING_ON_SPACES else "opt"
603
+ texture_opt_steps = 120 if RUNNING_ON_SPACES else 1000
604
+ glb = trellis_postprocessing_utils.to_glb(
605
+ gaussian,
606
+ mesh,
607
+ simplify=mesh_simplify,
608
+ fill_holes=False,
609
+ texture_size=texture_size,
610
+ texture_mode=texture_mode,
611
+ texture_opt_steps=texture_opt_steps,
612
+ verbose=False,
613
+ )
614
+ else:
615
+ mesh_state = unpack_trellis2_state(state)
616
+ decimation_target = max(100000, int((1.0 - mesh_simplify) * 500000))
617
+ glb = o_voxel.postprocess.to_glb(
618
+ vertices=mesh_state["vertices"],
619
+ faces=mesh_state["faces"],
620
+ attr_volume=mesh_state["attrs"],
621
+ coords=mesh_state["coords"],
622
+ attr_layout=mesh_state["attr_layout"],
623
+ grid_size=mesh_state["grid_size"],
624
+ aabb=[[-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]],
625
+ decimation_target=decimation_target,
626
+ texture_size=texture_size,
627
+ remesh=not RUNNING_ON_SPACES,
628
+ remesh_band=1,
629
+ remesh_project=0,
630
+ use_tqdm=False,
631
+ )
632
+
633
+ progress(0.75, desc="Exporting GLB...")
634
  current_time_glb = datetime.now().strftime("%Y-%m%d-%H%M%S-%f")
635
+ glb_path = os.path.join(TMP_DIR, f"{current_time_glb}.glb")
636
  glb.export(glb_path)
637
+ logger.info("GLB exported: %s (%d bytes)", glb_path, os.path.getsize(glb_path))
638
 
639
+ stl_start = time.time()
640
+ stl_path = export_stl_from_glb(glb_path)
641
+ logger.info("STL Export Time: %.2fs", time.time() - stl_start)
642
+ logger.info("Phase B Total Time: %.2fs", time.time() - job_start)
 
 
643
 
 
 
644
  stl_update = gr.update(value=stl_path, visible=True, interactive=bool(stl_path))
645
+ return glb_path, stl_update
646
+ except RuntimeError as re:
647
+ if "out of memory" in str(re).lower():
648
+ clear_cuda_cache()
649
+ raise gr.Error("Model extraction ran out of GPU memory. Try lowering texture size.")
650
  raise
651
  except Exception as e:
652
+ logger.error("Model extraction error: %s", e, exc_info=True)
653
+ raise gr.Error("Model extraction failed. Preview is ready; try lower texture size and retry.")
654
  finally:
655
  clear_cuda_cache()
656
 
 
670
  .custom-header { display: flex; align-items: center; height: 100%; }
671
  """
672
 
673
+ with gr.Blocks(theme="Taithrah/Minimal", css=css, title="Pocket 3D AI") as demo:
674
+ default_ss_steps = 6 if RUNNING_ON_SPACES else 12
675
+ default_slat_steps = 4 if RUNNING_ON_SPACES else 6
676
+ default_texture_size = 512 if RUNNING_ON_SPACES else 1024
677
+ texture_min = 512
678
+ texture_max = 512 if RUNNING_ON_SPACES else 4096
679
+ texture_step = 512
 
 
680
 
681
  with gr.Row(equal_height=True):
682
+ gr.Image(
683
+ "assets/sb_pocket_logo_dark.png",
684
+ show_label=False,
685
+ container=False,
686
+ show_download_button=False,
687
+ min_width=50,
688
+ interactive=False,
689
+ show_fullscreen_button=False,
690
+ )
691
 
692
  with gr.Column():
693
  with gr.Row():
 
697
  format="png",
698
  image_mode="RGBA",
699
  type="pil",
700
+ sources=["upload", "clipboard"],
701
  container=True,
702
  mirror_webcam=True,
703
  visible=True,
 
720
  with gr.Row(equal_height=False):
721
  with gr.Column(scale=2, min_width=100, variant="default"):
722
  examples = gr.Examples(
723
+ examples=[f"./assets/example_image/{image}" for image in os.listdir("./assets/example_image")],
 
 
 
724
  inputs=[image_prompt],
725
  examples_per_page=9,
726
  )
 
735
  elem_classes="model-container",
736
  visible=True,
737
  )
738
+ stl_download_button = gr.DownloadButton(
739
+ label="Download STL",
740
+ visible=True,
741
+ interactive=False,
742
+ size="lg",
743
+ variant="primary",
744
+ )
745
 
746
  with gr.Accordion(label="Generation Settings", open=False, visible=show_options and not RUNNING_ON_SPACES):
747
  seed_slider = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
748
  randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
 
749
  gr.Markdown("Stage 1: Sparse Structure Generation")
750
  with gr.Row():
751
+ ss_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=7.5, step=0.1)
752
+ ss_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=default_ss_steps, step=1)
753
+ gr.Markdown("Stage 2: Structured Latent Generation")
 
 
 
 
754
  with gr.Row():
755
+ slat_guidance_strength = gr.Slider(0.0, 10.0, label="Guidance Strength", value=1.5, step=0.1)
756
+ slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=default_slat_steps, step=1)
757
+
758
+ if RUNNING_ON_SPACES:
759
+ seed_slider = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1, visible=False)
760
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True, visible=False)
761
+ ss_guidance_strength = gr.Slider(0.0, 10.0, value=7.5, step=0.1, visible=False)
762
+ ss_sampling_steps = gr.Slider(1, 50, value=default_ss_steps, step=1, visible=False)
763
+ slat_guidance_strength = gr.Slider(0.0, 10.0, value=1.5, step=0.1, visible=False)
764
+ slat_sampling_steps = gr.Slider(1, 50, value=default_slat_steps, step=1, visible=False)
765
 
766
  with gr.Accordion(label="GLB Extraction Settings", open=False, visible=show_options and not RUNNING_ON_SPACES):
767
+ mesh_simplify = gr.Slider(0.0, 0.98, label="Simplify", value=0.95, step=0.01)
768
  texture_size = gr.Slider(texture_min, texture_max, label="Texture Size", value=default_texture_size, step=texture_step)
769
 
770
+ if RUNNING_ON_SPACES:
771
+ mesh_simplify = gr.Slider(0.0, 0.98, value=0.95, step=0.01, visible=False)
772
+ texture_size = gr.Slider(texture_min, texture_max, value=default_texture_size, step=texture_step, visible=False)
773
+
774
+ generation_state = gr.State(value=None)
775
 
776
  demo.load(start_session)
777
  demo.unload(end_session)
778
 
779
+ common_inputs = [
780
  image_prompt,
781
  seed_slider,
782
+ ss_guidance_strength,
783
+ ss_sampling_steps,
784
+ slat_guidance_strength,
785
+ slat_sampling_steps,
 
786
  ]
 
 
 
787
 
788
  image_prompt.upload(
789
  get_seed,
790
  inputs=[randomize_seed, seed_slider],
791
  outputs=[seed_slider],
792
  show_progress="minimal",
793
+ trigger_mode="always_last",
794
+ ).then(
795
+ fn=generate_preview_and_state,
796
+ inputs=common_inputs,
797
+ outputs=[generation_state, video_output, stl_download_button],
798
+ show_progress="minimal",
799
+ scroll_to_output=True,
800
  ).then(
801
+ fn=extract_model_from_state,
802
+ inputs=[generation_state, mesh_simplify, texture_size],
803
+ outputs=[model_output, stl_download_button],
804
  show_progress="minimal",
805
  scroll_to_output=True,
806
  )
 
812
  show_progress="minimal",
813
  trigger_mode="always_last",
814
  ).then(
815
+ fn=generate_preview_and_state,
816
+ inputs=common_inputs,
817
+ outputs=[generation_state, video_output, stl_download_button],
818
+ show_progress="minimal",
819
+ scroll_to_output=True,
820
+ ).then(
821
+ fn=extract_model_from_state,
822
+ inputs=[generation_state, mesh_simplify, texture_size],
823
+ outputs=[model_output, stl_download_button],
824
  show_progress="minimal",
825
  scroll_to_output=True,
826
  )
827
 
828
+
829
  if __name__ == "__main__":
830
  if pipeline is None:
831
  logger.critical("Pipeline failed to initialize. Exiting.")
832
  sys.exit(1)
833
 
834
+ logger.info("Launching runtime: %s", TRELLIS_RUNTIME)
835
  if RUNNING_ON_SPACES:
836
  logger.info("Launching on HuggingFace Spaces")
837
  demo.queue(max_size=8, default_concurrency_limit=1, api_open=False).launch(
838
  show_api=False,
839
  share=False,
840
+ allowed_paths=["./cache", "./assets"],
841
  )
842
  elif prod:
843
+ logger.info("Launching in PRODUCTION mode on port %s", port)
844
  demo.queue(max_size=20, default_concurrency_limit=5).launch(
845
  server_name="0.0.0.0",
846
  server_port=port,
847
  show_api=False,
848
  favicon_path="assets/sb_3d_ai_logo.png",
849
  share=False,
850
+ allowed_paths=["./cache", "./assets"],
851
  )
852
  else:
853
+ logger.info("Launching in DEVELOPMENT mode on port %s", port)
854
  demo.queue(api_open=False).launch(
855
  server_name="0.0.0.0",
856
  server_port=port,
 
858
  favicon_path="assets/sb_3d_ai_logo.png",
859
  debug=True,
860
  share=True,
861
+ allowed_paths=["./cache", "./assets"],
862
  )
requirements.txt CHANGED
@@ -23,3 +23,12 @@ https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/f
23
  https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/o_voxel-0.0.1-cp310-cp310-linux_x86_64.whl
24
  https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/nvdiffrast-0.4.0-cp310-cp310-linux_x86_64.whl
25
  https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/nvdiffrec_render-0.0.0-cp310-cp310-linux_x86_64.whl
 
 
 
 
 
 
 
 
 
 
23
  https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/o_voxel-0.0.1-cp310-cp310-linux_x86_64.whl
24
  https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/nvdiffrast-0.4.0-cp310-cp310-linux_x86_64.whl
25
  https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/nvdiffrec_render-0.0.0-cp310-cp310-linux_x86_64.whl
26
+
27
+ # Original TRELLIS runtime + textured GLB export stack
28
+ spconv-cu124==2.3.8
29
+ xatlas==0.0.9
30
+ pyvista==0.44.2
31
+ pymeshfix==0.17.0
32
+ igraph==0.11.8
33
+ git+https://github.com/JeffreyXiang/diffoctreerast.git
34
+ git+https://github.com/graphdeco-inria/diff-gaussian-rasterization.git
trellis/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import models
2
+ from . import modules
3
+ from . import pipelines
4
+ from . import renderers
5
+ from . import representations
6
+ from . import utils
trellis/models/__init__.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+
3
+ __attributes = {
4
+ 'SparseStructureEncoder': 'sparse_structure_vae',
5
+ 'SparseStructureDecoder': 'sparse_structure_vae',
6
+ 'SparseStructureFlowModel': 'sparse_structure_flow',
7
+ 'SLatEncoder': 'structured_latent_vae',
8
+ 'SLatGaussianDecoder': 'structured_latent_vae',
9
+ 'SLatRadianceFieldDecoder': 'structured_latent_vae',
10
+ 'SLatMeshDecoder': 'structured_latent_vae',
11
+ 'SLatFlowModel': 'structured_latent_flow',
12
+ }
13
+
14
+ __submodules = []
15
+
16
+ __all__ = list(__attributes.keys()) + __submodules
17
+
18
+ def __getattr__(name):
19
+ if name not in globals():
20
+ if name in __attributes:
21
+ module_name = __attributes[name]
22
+ module = importlib.import_module(f".{module_name}", __name__)
23
+ globals()[name] = getattr(module, name)
24
+ elif name in __submodules:
25
+ module = importlib.import_module(f".{name}", __name__)
26
+ globals()[name] = module
27
+ else:
28
+ raise AttributeError(f"module {__name__} has no attribute {name}")
29
+ return globals()[name]
30
+
31
+
32
+ def from_pretrained(path: str, **kwargs):
33
+ """
34
+ Load a model from a pretrained checkpoint.
35
+
36
+ Args:
37
+ path: The path to the checkpoint. Can be either local path or a Hugging Face model name.
38
+ NOTE: config file and model file should take the name f'{path}.json' and f'{path}.safetensors' respectively.
39
+ **kwargs: Additional arguments for the model constructor.
40
+ """
41
+ import os
42
+ import json
43
+ from safetensors.torch import load_file
44
+ is_local = os.path.exists(f"{path}.json") and os.path.exists(f"{path}.safetensors")
45
+
46
+ if is_local:
47
+ config_file = f"{path}.json"
48
+ model_file = f"{path}.safetensors"
49
+ else:
50
+ from huggingface_hub import hf_hub_download
51
+ path_parts = path.split('/')
52
+ repo_id = f'{path_parts[0]}/{path_parts[1]}'
53
+ model_name = '/'.join(path_parts[2:])
54
+ config_file = hf_hub_download(repo_id, f"{model_name}.json")
55
+ model_file = hf_hub_download(repo_id, f"{model_name}.safetensors")
56
+
57
+ with open(config_file, 'r') as f:
58
+ config = json.load(f)
59
+ model = __getattr__(config['name'])(**config['args'], **kwargs)
60
+ model.load_state_dict(load_file(model_file))
61
+
62
+ return model
63
+
64
+
65
+ # For Pylance
66
+ if __name__ == '__main__':
67
+ from .sparse_structure_vae import SparseStructureEncoder, SparseStructureDecoder
68
+ from .sparse_structure_flow import SparseStructureFlowModel
69
+ from .structured_latent_vae import SLatEncoder, SLatGaussianDecoder, SLatRadianceFieldDecoder, SLatMeshDecoder
70
+ from .structured_latent_flow import SLatFlowModel
trellis/models/sparse_structure_flow.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder, ModulatedTransformerCrossBlock
8
+ from ..modules.spatial import patchify, unpatchify
9
+
10
+
11
+ class TimestepEmbedder(nn.Module):
12
+ """
13
+ Embeds scalar timesteps into vector representations.
14
+ """
15
+ def __init__(self, hidden_size, frequency_embedding_size=256):
16
+ super().__init__()
17
+ self.mlp = nn.Sequential(
18
+ nn.Linear(frequency_embedding_size, hidden_size, bias=True),
19
+ nn.SiLU(),
20
+ nn.Linear(hidden_size, hidden_size, bias=True),
21
+ )
22
+ self.frequency_embedding_size = frequency_embedding_size
23
+
24
+ @staticmethod
25
+ def timestep_embedding(t, dim, max_period=10000):
26
+ """
27
+ Create sinusoidal timestep embeddings.
28
+
29
+ Args:
30
+ t: a 1-D Tensor of N indices, one per batch element.
31
+ These may be fractional.
32
+ dim: the dimension of the output.
33
+ max_period: controls the minimum frequency of the embeddings.
34
+
35
+ Returns:
36
+ an (N, D) Tensor of positional embeddings.
37
+ """
38
+ # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py
39
+ half = dim // 2
40
+ freqs = torch.exp(
41
+ -np.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
42
+ ).to(device=t.device)
43
+
44
+ args = t[:, None].to(t.dtype) * freqs[None] #to(t.dtype) so that it works with float16, float32 etc.
45
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
46
+ if dim % 2:
47
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
48
+ # Convert back to input dtype before returning
49
+ return embedding
50
+
51
+ def forward(self, t):
52
+ t = t.to(self.mlp[0].weight.dtype)# Make sure t matches the MLP’s dtype (often half if the model is half)
53
+ t_freq = self.timestep_embedding(t, self.frequency_embedding_size)
54
+ t_freq = t_freq.to(self.mlp[0].weight.dtype)# also ensure t_freq is cast if needed
55
+ t_emb = self.mlp(t_freq)
56
+ return t_emb
57
+
58
+
59
+ class SparseStructureFlowModel(nn.Module):
60
+ def __init__(
61
+ self,
62
+ resolution: int,
63
+ in_channels: int,
64
+ model_channels: int,
65
+ cond_channels: int,
66
+ out_channels: int,
67
+ num_blocks: int,
68
+ num_heads: Optional[int] = None,
69
+ num_head_channels: Optional[int] = 64,
70
+ mlp_ratio: float = 4,
71
+ patch_size: int = 2,
72
+ pe_mode: Literal["ape", "rope"] = "ape",
73
+ use_fp16: bool = False,
74
+ use_checkpoint: bool = False,
75
+ share_mod: bool = False,
76
+ qk_rms_norm: bool = False,
77
+ qk_rms_norm_cross: bool = False,
78
+ ):
79
+ super().__init__()
80
+ self.resolution = resolution
81
+ self.in_channels = in_channels
82
+ self.model_channels = model_channels
83
+ self.cond_channels = cond_channels
84
+ self.out_channels = out_channels
85
+ self.num_blocks = num_blocks
86
+ self.num_heads = num_heads or model_channels // num_head_channels
87
+ self.mlp_ratio = mlp_ratio
88
+ self.patch_size = patch_size
89
+ self.pe_mode = pe_mode
90
+ self.use_fp16 = use_fp16
91
+ self.use_checkpoint = use_checkpoint
92
+ self.share_mod = share_mod
93
+ self.qk_rms_norm = qk_rms_norm
94
+ self.qk_rms_norm_cross = qk_rms_norm_cross
95
+ self.dtype = torch.float16 if use_fp16 else torch.float32
96
+
97
+ self.t_embedder = TimestepEmbedder(model_channels)
98
+ if share_mod:
99
+ self.adaLN_modulation = nn.Sequential(
100
+ nn.SiLU(),
101
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
102
+ )
103
+
104
+ if pe_mode == "ape":
105
+ pos_embedder = AbsolutePositionEmbedder(model_channels, 3)
106
+ coords = torch.meshgrid(*[torch.arange(res, device=self.device) for res in [resolution // patch_size] * 3], indexing='ij')
107
+ coords = torch.stack(coords, dim=-1).reshape(-1, 3)
108
+ pos_emb = pos_embedder(coords)
109
+ self.register_buffer("pos_emb", pos_emb)
110
+
111
+ self.input_layer = nn.Linear(in_channels * patch_size**3, model_channels)
112
+
113
+ self.blocks = nn.ModuleList([
114
+ ModulatedTransformerCrossBlock(
115
+ model_channels,
116
+ cond_channels,
117
+ num_heads=self.num_heads,
118
+ mlp_ratio=self.mlp_ratio,
119
+ attn_mode='full',
120
+ use_checkpoint=self.use_checkpoint,
121
+ use_rope=(pe_mode == "rope"),
122
+ share_mod=share_mod,
123
+ qk_rms_norm=self.qk_rms_norm,
124
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
125
+ )
126
+ for _ in range(num_blocks)
127
+ ])
128
+
129
+ self.out_layer = nn.Linear(model_channels, out_channels * patch_size**3)
130
+
131
+ self.initialize_weights()
132
+ if use_fp16:
133
+ self.convert_to_fp16()
134
+
135
+ @property
136
+ def device(self) -> torch.device:
137
+ """
138
+ Return the device of the model.
139
+ """
140
+ return next(self.parameters()).device
141
+
142
+ def convert_to_fp16(self) -> None:
143
+ """
144
+ Convert the torso of the model to float16.
145
+ """
146
+ self.blocks.apply(convert_module_to_f16)
147
+
148
+ def convert_to_fp32(self) -> None:
149
+ """
150
+ Convert the torso of the model to float32.
151
+ """
152
+ self.blocks.apply(convert_module_to_f32)
153
+
154
+ def initialize_weights(self) -> None:
155
+ # Initialize transformer layers:
156
+ def _basic_init(module):
157
+ if isinstance(module, nn.Linear):
158
+ torch.nn.init.xavier_uniform_(module.weight)
159
+ if module.bias is not None:
160
+ nn.init.constant_(module.bias, 0)
161
+ self.apply(_basic_init)
162
+
163
+ # Initialize timestep embedding MLP:
164
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
165
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
166
+
167
+ # Zero-out adaLN modulation layers in DiT blocks:
168
+ if self.share_mod:
169
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
170
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
171
+ else:
172
+ for block in self.blocks:
173
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
174
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
175
+
176
+ # Zero-out output layers:
177
+ nn.init.constant_(self.out_layer.weight, 0)
178
+ nn.init.constant_(self.out_layer.bias, 0)
179
+
180
+ def forward(self, x: torch.Tensor, t: torch.Tensor, cond: torch.Tensor) -> torch.Tensor:
181
+ assert [*x.shape] == [x.shape[0], self.in_channels, *[self.resolution] * 3], \
182
+ f"Input shape mismatch, got {x.shape}, expected {[x.shape[0], self.in_channels, *[self.resolution] * 3]}"
183
+
184
+ h = patchify(x, self.patch_size)
185
+ h = h.view(*h.shape[:2], -1).permute(0, 2, 1).contiguous()
186
+
187
+ h = self.input_layer(h)
188
+ h = h + self.pos_emb[None]
189
+ t_emb = self.t_embedder(t)
190
+ if self.share_mod:
191
+ t_emb = self.adaLN_modulation(t_emb)
192
+ t_emb = t_emb.type(self.dtype)
193
+ h = h.type(self.dtype)
194
+ cond = cond.type(self.dtype)
195
+ for block in self.blocks:
196
+ h = block(h, t_emb, cond)
197
+ h = h.type(x.dtype)
198
+ h = F.layer_norm(h, h.shape[-1:])
199
+ h = self.out_layer(h)
200
+
201
+ h = h.permute(0, 2, 1).view(h.shape[0], h.shape[2], *[self.resolution // self.patch_size] * 3)
202
+ h = unpatchify(h, self.patch_size).contiguous()
203
+
204
+ return h
trellis/models/sparse_structure_vae.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ..modules.norm import GroupNorm32, ChannelLayerNorm32
6
+ from ..modules.spatial import pixel_shuffle_3d
7
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
8
+
9
+
10
+ def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
11
+ """
12
+ Return a normalization layer.
13
+ """
14
+ if norm_type == "group":
15
+ return GroupNorm32(32, *args, **kwargs)
16
+ elif norm_type == "layer":
17
+ return ChannelLayerNorm32(*args, **kwargs)
18
+ else:
19
+ raise ValueError(f"Invalid norm type {norm_type}")
20
+
21
+
22
+ class ResBlock3d(nn.Module):
23
+ def __init__(
24
+ self,
25
+ channels: int,
26
+ out_channels: Optional[int] = None,
27
+ norm_type: Literal["group", "layer"] = "layer",
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.out_channels = out_channels or channels
32
+
33
+ self.norm1 = norm_layer(norm_type, channels)
34
+ self.norm2 = norm_layer(norm_type, self.out_channels)
35
+ self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
36
+ self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
37
+ self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
38
+
39
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
40
+ h = self.norm1(x)
41
+ h = F.silu(h)
42
+ h = self.conv1(h)
43
+ h = self.norm2(h)
44
+ h = F.silu(h)
45
+ h = self.conv2(h)
46
+ h = h + self.skip_connection(x)
47
+ return h
48
+
49
+
50
+ class DownsampleBlock3d(nn.Module):
51
+ def __init__(
52
+ self,
53
+ in_channels: int,
54
+ out_channels: int,
55
+ mode: Literal["conv", "avgpool"] = "conv",
56
+ ):
57
+ assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
58
+
59
+ super().__init__()
60
+ self.in_channels = in_channels
61
+ self.out_channels = out_channels
62
+
63
+ if mode == "conv":
64
+ self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
65
+ elif mode == "avgpool":
66
+ assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
67
+
68
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
69
+ if hasattr(self, "conv"):
70
+ return self.conv(x)
71
+ else:
72
+ return F.avg_pool3d(x, 2)
73
+
74
+
75
+ class UpsampleBlock3d(nn.Module):
76
+ def __init__(
77
+ self,
78
+ in_channels: int,
79
+ out_channels: int,
80
+ mode: Literal["conv", "nearest"] = "conv",
81
+ ):
82
+ assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
83
+
84
+ super().__init__()
85
+ self.in_channels = in_channels
86
+ self.out_channels = out_channels
87
+
88
+ if mode == "conv":
89
+ self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
90
+ elif mode == "nearest":
91
+ assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
92
+
93
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
94
+ if hasattr(self, "conv"):
95
+ x = self.conv(x)
96
+ return pixel_shuffle_3d(x, 2)
97
+ else:
98
+ return F.interpolate(x, scale_factor=2, mode="nearest")
99
+
100
+
101
+ class SparseStructureEncoder(nn.Module):
102
+ """
103
+ Encoder for Sparse Structure (\mathcal{E}_S in the paper Sec. 3.3).
104
+
105
+ Args:
106
+ in_channels (int): Channels of the input.
107
+ latent_channels (int): Channels of the latent representation.
108
+ num_res_blocks (int): Number of residual blocks at each resolution.
109
+ channels (List[int]): Channels of the encoder blocks.
110
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
111
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
112
+ use_fp16 (bool): Whether to use FP16.
113
+ """
114
+ def __init__(
115
+ self,
116
+ in_channels: int,
117
+ latent_channels: int,
118
+ num_res_blocks: int,
119
+ channels: List[int],
120
+ num_res_blocks_middle: int = 2,
121
+ norm_type: Literal["group", "layer"] = "layer",
122
+ use_fp16: bool = True,
123
+ ):
124
+ super().__init__()
125
+ self.in_channels = in_channels
126
+ self.latent_channels = latent_channels
127
+ self.num_res_blocks = num_res_blocks
128
+ self.channels = channels
129
+ self.num_res_blocks_middle = num_res_blocks_middle
130
+ self.norm_type = norm_type
131
+ self.use_fp16 = use_fp16
132
+ self.dtype = torch.float16 if use_fp16 else torch.float32
133
+
134
+ self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
135
+
136
+ self.blocks = nn.ModuleList([])
137
+ for i, ch in enumerate(channels):
138
+ self.blocks.extend([
139
+ ResBlock3d(ch, ch)
140
+ for _ in range(num_res_blocks)
141
+ ])
142
+ if i < len(channels) - 1:
143
+ self.blocks.append(
144
+ DownsampleBlock3d(ch, channels[i+1])
145
+ )
146
+
147
+ self.middle_block = nn.Sequential(*[
148
+ ResBlock3d(channels[-1], channels[-1])
149
+ for _ in range(num_res_blocks_middle)
150
+ ])
151
+
152
+ self.out_layer = nn.Sequential(
153
+ norm_layer(norm_type, channels[-1]),
154
+ nn.SiLU(),
155
+ nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
156
+ )
157
+
158
+ if use_fp16:
159
+ self.convert_to_fp16()
160
+
161
+ @property
162
+ def device(self) -> torch.device:
163
+ """
164
+ Return the device of the model.
165
+ """
166
+ return next(self.parameters()).device
167
+
168
+ def convert_to_fp16(self) -> None:
169
+ """
170
+ Convert the torso of the model to float16.
171
+ """
172
+ self.use_fp16 = True
173
+ self.dtype = torch.float16
174
+ self.blocks.apply(convert_module_to_f16)
175
+ self.middle_block.apply(convert_module_to_f16)
176
+
177
+ def convert_to_fp32(self) -> None:
178
+ """
179
+ Convert the torso of the model to float32.
180
+ """
181
+ self.use_fp16 = False
182
+ self.dtype = torch.float32
183
+ self.blocks.apply(convert_module_to_f32)
184
+ self.middle_block.apply(convert_module_to_f32)
185
+
186
+ def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False) -> torch.Tensor:
187
+ h = self.input_layer(x)
188
+ h = h.type(self.dtype)
189
+
190
+ for block in self.blocks:
191
+ h = block(h)
192
+ h = self.middle_block(h)
193
+
194
+ h = h.type(x.dtype)
195
+ h = self.out_layer(h)
196
+
197
+ mean, logvar = h.chunk(2, dim=1)
198
+
199
+ if sample_posterior:
200
+ std = torch.exp(0.5 * logvar)
201
+ z = mean + std * torch.randn_like(std)
202
+ else:
203
+ z = mean
204
+
205
+ if return_raw:
206
+ return z, mean, logvar
207
+ return z
208
+
209
+
210
+ class SparseStructureDecoder(nn.Module):
211
+ """
212
+ Decoder for Sparse Structure (\mathcal{D}_S in the paper Sec. 3.3).
213
+
214
+ Args:
215
+ out_channels (int): Channels of the output.
216
+ latent_channels (int): Channels of the latent representation.
217
+ num_res_blocks (int): Number of residual blocks at each resolution.
218
+ channels (List[int]): Channels of the decoder blocks.
219
+ num_res_blocks_middle (int): Number of residual blocks in the middle.
220
+ norm_type (Literal["group", "layer"]): Type of normalization layer.
221
+ use_fp16 (bool): Whether to use FP16.
222
+ """
223
+ def __init__(
224
+ self,
225
+ out_channels: int,
226
+ latent_channels: int,
227
+ num_res_blocks: int,
228
+ channels: List[int],
229
+ num_res_blocks_middle: int = 2,
230
+ norm_type: Literal["group", "layer"] = "layer",
231
+ use_fp16: bool = False,
232
+ ):
233
+ super().__init__()
234
+ self.out_channels = out_channels
235
+ self.latent_channels = latent_channels
236
+ self.num_res_blocks = num_res_blocks
237
+ self.channels = channels
238
+ self.num_res_blocks_middle = num_res_blocks_middle
239
+ self.norm_type = norm_type
240
+ self.use_fp16 = use_fp16
241
+ self.dtype = torch.float16 if use_fp16 else torch.float32
242
+
243
+ self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
244
+
245
+ self.middle_block = nn.Sequential(*[
246
+ ResBlock3d(channels[0], channels[0])
247
+ for _ in range(num_res_blocks_middle)
248
+ ])
249
+
250
+ self.blocks = nn.ModuleList([])
251
+ for i, ch in enumerate(channels):
252
+ self.blocks.extend([
253
+ ResBlock3d(ch, ch)
254
+ for _ in range(num_res_blocks)
255
+ ])
256
+ if i < len(channels) - 1:
257
+ self.blocks.append(
258
+ UpsampleBlock3d(ch, channels[i+1])
259
+ )
260
+
261
+ self.out_layer = nn.Sequential(
262
+ norm_layer(norm_type, channels[-1]),
263
+ nn.SiLU(),
264
+ nn.Conv3d(channels[-1], out_channels, 3, padding=1)
265
+ )
266
+
267
+ if use_fp16:
268
+ self.convert_to_fp16()
269
+
270
+ @property
271
+ def device(self) -> torch.device:
272
+ """
273
+ Return the device of the model.
274
+ """
275
+ return next(self.parameters()).device
276
+
277
+ def convert_to_fp16(self) -> None:
278
+ """
279
+ Convert the torso of the model to float16.
280
+ """
281
+ self.use_fp16 = True
282
+ self.dtype = torch.float16
283
+ self.blocks.apply(convert_module_to_f16)
284
+ self.middle_block.apply(convert_module_to_f16)
285
+
286
+ def convert_to_fp32(self) -> None:
287
+ """
288
+ Convert the torso of the model to float32.
289
+ """
290
+ self.use_fp16 = False
291
+ self.dtype = torch.float32
292
+ self.blocks.apply(convert_module_to_f32)
293
+ self.middle_block.apply(convert_module_to_f32)
294
+
295
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
296
+ # figure out model dtype from parameters
297
+ desired_dtype = next(self.parameters()).dtype #so that it works with half-precision
298
+ x = x.to(dtype=desired_dtype)
299
+
300
+ h = self.input_layer(x)
301
+
302
+ h = h.type(self.dtype)
303
+
304
+ h = self.middle_block(h)
305
+ for block in self.blocks:
306
+ h = block(h)
307
+
308
+ h = h.type(x.dtype)
309
+ h = self.out_layer(h)
310
+ return h
trellis/models/structured_latent_flow.py ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ..modules.transformer import AbsolutePositionEmbedder
8
+ from ..modules.norm import LayerNorm32
9
+ from ..modules import sparse as sp
10
+ from ..modules.sparse.transformer import ModulatedSparseTransformerCrossBlock
11
+ from .sparse_structure_flow import TimestepEmbedder
12
+
13
+
14
+ class SparseResBlock3d(nn.Module):
15
+ def __init__(
16
+ self,
17
+ channels: int,
18
+ emb_channels: int,
19
+ out_channels: Optional[int] = None,
20
+ downsample: bool = False,
21
+ upsample: bool = False,
22
+ ):
23
+ super().__init__()
24
+ self.channels = channels
25
+ self.emb_channels = emb_channels
26
+ self.out_channels = out_channels or channels
27
+ self.downsample = downsample
28
+ self.upsample = upsample
29
+
30
+ assert not (downsample and upsample), "Cannot downsample and upsample at the same time"
31
+
32
+ self.norm1 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
33
+ self.norm2 = LayerNorm32(self.out_channels, elementwise_affine=False, eps=1e-6)
34
+ self.conv1 = sp.SparseConv3d(channels, self.out_channels, 3)
35
+ self.conv2 = zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3))
36
+ self.emb_layers = nn.Sequential(
37
+ nn.SiLU(),
38
+ nn.Linear(emb_channels, 2 * self.out_channels, bias=True),
39
+ )
40
+ self.skip_connection = sp.SparseLinear(channels, self.out_channels) if channels != self.out_channels else nn.Identity()
41
+ self.updown = None
42
+ if self.downsample:
43
+ self.updown = sp.SparseDownsample(2)
44
+ elif self.upsample:
45
+ self.updown = sp.SparseUpsample(2)
46
+
47
+ def _updown(self, x: sp.SparseTensor) -> sp.SparseTensor:
48
+ if self.updown is not None:
49
+ x = self.updown(x)
50
+ return x
51
+
52
+ def forward(self, x: sp.SparseTensor, emb: torch.Tensor) -> sp.SparseTensor:
53
+ emb_out = self.emb_layers(emb).type(x.dtype)
54
+ scale, shift = torch.chunk(emb_out, 2, dim=1)
55
+
56
+ x = self._updown(x)
57
+ h = x.replace(self.norm1(x.feats))
58
+ h = h.replace(F.silu(h.feats))
59
+ h = self.conv1(h)
60
+ h = h.replace(self.norm2(h.feats)) * (1 + scale) + shift
61
+ h = h.replace(F.silu(h.feats))
62
+ h = self.conv2(h)
63
+ h = h + self.skip_connection(x)
64
+
65
+ return h
66
+
67
+
68
+ class SLatFlowModel(nn.Module):
69
+ def __init__(
70
+ self,
71
+ resolution: int,
72
+ in_channels: int,
73
+ model_channels: int,
74
+ cond_channels: int,
75
+ out_channels: int,
76
+ num_blocks: int,
77
+ num_heads: Optional[int] = None,
78
+ num_head_channels: Optional[int] = 64,
79
+ mlp_ratio: float = 4,
80
+ patch_size: int = 2,
81
+ num_io_res_blocks: int = 2,
82
+ io_block_channels: List[int] = None,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = True,
85
+ use_checkpoint: bool = False,
86
+ use_skip_connection: bool = True,
87
+ share_mod: bool = False,
88
+ qk_rms_norm: bool = False,
89
+ qk_rms_norm_cross: bool = False,
90
+ ):
91
+ super().__init__()
92
+ self.resolution = resolution
93
+ self.in_channels = in_channels
94
+ self.model_channels = model_channels
95
+ self.cond_channels = cond_channels
96
+ self.out_channels = out_channels
97
+ self.num_blocks = num_blocks
98
+ self.num_heads = num_heads or model_channels // num_head_channels
99
+ self.mlp_ratio = mlp_ratio
100
+ self.patch_size = patch_size
101
+ self.num_io_res_blocks = num_io_res_blocks
102
+ self.io_block_channels = io_block_channels
103
+ self.pe_mode = pe_mode
104
+ self.use_fp16 = use_fp16
105
+ self.use_checkpoint = use_checkpoint
106
+ self.use_skip_connection = use_skip_connection
107
+ self.share_mod = share_mod
108
+ self.qk_rms_norm = qk_rms_norm
109
+ self.qk_rms_norm_cross = qk_rms_norm_cross
110
+ self.dtype = torch.float16 if use_fp16 else torch.float32
111
+
112
+ assert int(np.log2(patch_size)) == np.log2(patch_size), "Patch size must be a power of 2"
113
+ assert np.log2(patch_size) == len(io_block_channels), "Number of IO ResBlocks must match the number of stages"
114
+
115
+ self.t_embedder = TimestepEmbedder(model_channels)
116
+ if share_mod:
117
+ self.adaLN_modulation = nn.Sequential(
118
+ nn.SiLU(),
119
+ nn.Linear(model_channels, 6 * model_channels, bias=True)
120
+ )
121
+
122
+ if pe_mode == "ape":
123
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
124
+
125
+ self.input_layer = sp.SparseLinear(in_channels, io_block_channels[0])
126
+ self.input_blocks = nn.ModuleList([])
127
+ for chs, next_chs in zip(io_block_channels, io_block_channels[1:] + [model_channels]):
128
+ self.input_blocks.extend([
129
+ SparseResBlock3d(
130
+ chs,
131
+ model_channels,
132
+ out_channels=chs,
133
+ )
134
+ for _ in range(num_io_res_blocks-1)
135
+ ])
136
+ self.input_blocks.append(
137
+ SparseResBlock3d(
138
+ chs,
139
+ model_channels,
140
+ out_channels=next_chs,
141
+ downsample=True,
142
+ )
143
+ )
144
+
145
+ self.blocks = nn.ModuleList([
146
+ ModulatedSparseTransformerCrossBlock(
147
+ model_channels,
148
+ cond_channels,
149
+ num_heads=self.num_heads,
150
+ mlp_ratio=self.mlp_ratio,
151
+ attn_mode='full',
152
+ use_checkpoint=self.use_checkpoint,
153
+ use_rope=(pe_mode == "rope"),
154
+ share_mod=self.share_mod,
155
+ qk_rms_norm=self.qk_rms_norm,
156
+ qk_rms_norm_cross=self.qk_rms_norm_cross,
157
+ )
158
+ for _ in range(num_blocks)
159
+ ])
160
+
161
+ self.out_blocks = nn.ModuleList([])
162
+ for chs, prev_chs in zip(reversed(io_block_channels), [model_channels] + list(reversed(io_block_channels[1:]))):
163
+ self.out_blocks.append(
164
+ SparseResBlock3d(
165
+ prev_chs * 2 if self.use_skip_connection else prev_chs,
166
+ model_channels,
167
+ out_channels=chs,
168
+ upsample=True,
169
+ )
170
+ )
171
+ self.out_blocks.extend([
172
+ SparseResBlock3d(
173
+ chs * 2 if self.use_skip_connection else chs,
174
+ model_channels,
175
+ out_channels=chs,
176
+ )
177
+ for _ in range(num_io_res_blocks-1)
178
+ ])
179
+ self.out_layer = sp.SparseLinear(io_block_channels[0], out_channels)
180
+
181
+ self.initialize_weights()
182
+ if use_fp16:
183
+ self.convert_to_fp16()
184
+
185
+ @property
186
+ def device(self) -> torch.device:
187
+ """
188
+ Return the device of the model.
189
+ """
190
+ return next(self.parameters()).device
191
+
192
+ def convert_to_fp16(self) -> None:
193
+ """
194
+ Convert the torso of the model to float16.
195
+ """
196
+ self.input_blocks.apply(convert_module_to_f16)
197
+ self.blocks.apply(convert_module_to_f16)
198
+ self.out_blocks.apply(convert_module_to_f16)
199
+
200
+ def convert_to_fp32(self) -> None:
201
+ """
202
+ Convert the torso of the model to float32.
203
+ """
204
+ self.input_blocks.apply(convert_module_to_f32)
205
+ self.blocks.apply(convert_module_to_f32)
206
+ self.out_blocks.apply(convert_module_to_f32)
207
+
208
+ def initialize_weights(self) -> None:
209
+ # Initialize transformer layers:
210
+ def _basic_init(module):
211
+ if isinstance(module, nn.Linear):
212
+ torch.nn.init.xavier_uniform_(module.weight)
213
+ if module.bias is not None:
214
+ nn.init.constant_(module.bias, 0)
215
+ self.apply(_basic_init)
216
+
217
+ # Initialize timestep embedding MLP:
218
+ nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)
219
+ nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)
220
+
221
+ # Zero-out adaLN modulation layers in DiT blocks:
222
+ if self.share_mod:
223
+ nn.init.constant_(self.adaLN_modulation[-1].weight, 0)
224
+ nn.init.constant_(self.adaLN_modulation[-1].bias, 0)
225
+ else:
226
+ for block in self.blocks:
227
+ nn.init.constant_(block.adaLN_modulation[-1].weight, 0)
228
+ nn.init.constant_(block.adaLN_modulation[-1].bias, 0)
229
+
230
+ # Zero-out output layers:
231
+ nn.init.constant_(self.out_layer.weight, 0)
232
+ nn.init.constant_(self.out_layer.bias, 0)
233
+
234
+ def forward(self, x: sp.SparseTensor, t: torch.Tensor, cond: torch.Tensor) -> sp.SparseTensor:
235
+ h = self.input_layer(x).type(self.dtype)
236
+ t_emb = self.t_embedder(t)
237
+ if self.share_mod:
238
+ t_emb = self.adaLN_modulation(t_emb)
239
+ t_emb = t_emb.type(self.dtype)
240
+ cond = cond.type(self.dtype)
241
+
242
+ skips = []
243
+ # pack with input blocks
244
+ for block in self.input_blocks:
245
+ h = block(h, t_emb)
246
+ skips.append(h.feats)
247
+
248
+ if self.pe_mode == "ape":
249
+ h = h + self.pos_embedder(h.coords[:, 1:]).type(self.dtype)
250
+ for block in self.blocks:
251
+ h = block(h, t_emb, cond)
252
+
253
+ # unpack with output blocks
254
+ for block, skip in zip(self.out_blocks, reversed(skips)):
255
+ if self.use_skip_connection:
256
+ h = block(h.replace(torch.cat([h.feats, skip], dim=1)), t_emb)
257
+ else:
258
+ h = block(h, t_emb)
259
+
260
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
261
+ h = self.out_layer(h.type(x.dtype))
262
+ return h
trellis/models/structured_latent_vae/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .encoder import SLatEncoder
2
+ from .decoder_gs import SLatGaussianDecoder
3
+ from .decoder_rf import SLatRadianceFieldDecoder
4
+ from .decoder_mesh import SLatMeshDecoder
trellis/models/structured_latent_vae/base.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ...modules.utils import convert_module_to_f16, convert_module_to_f32
5
+ from ...modules import sparse as sp
6
+ from ...modules.transformer import AbsolutePositionEmbedder
7
+ from ...modules.sparse.transformer import SparseTransformerBlock
8
+
9
+
10
+ def block_attn_config(self):
11
+ """
12
+ Return the attention configuration of the model.
13
+ """
14
+ for i in range(self.num_blocks):
15
+ if self.attn_mode == "shift_window":
16
+ yield "serialized", self.window_size, 0, (16 * (i % 2),) * 3, sp.SerializeMode.Z_ORDER
17
+ elif self.attn_mode == "shift_sequence":
18
+ yield "serialized", self.window_size, self.window_size // 2 * (i % 2), (0, 0, 0), sp.SerializeMode.Z_ORDER
19
+ elif self.attn_mode == "shift_order":
20
+ yield "serialized", self.window_size, 0, (0, 0, 0), sp.SerializeModes[i % 4]
21
+ elif self.attn_mode == "full":
22
+ yield "full", None, None, None, None
23
+ elif self.attn_mode == "swin":
24
+ yield "windowed", self.window_size, None, self.window_size // 2 * (i % 2), None
25
+
26
+
27
+ class SparseTransformerBase(nn.Module):
28
+ """
29
+ Sparse Transformer without output layers.
30
+ Serve as the base class for encoder and decoder.
31
+ """
32
+ def __init__(
33
+ self,
34
+ in_channels: int,
35
+ model_channels: int,
36
+ num_blocks: int,
37
+ num_heads: Optional[int] = None,
38
+ num_head_channels: Optional[int] = 64,
39
+ mlp_ratio: float = 4.0,
40
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
41
+ window_size: Optional[int] = None,
42
+ pe_mode: Literal["ape", "rope"] = "ape",
43
+ use_fp16: bool = False,
44
+ use_checkpoint: bool = False,
45
+ qk_rms_norm: bool = False,
46
+ ):
47
+ super().__init__()
48
+ self.in_channels = in_channels
49
+ self.model_channels = model_channels
50
+ self.num_blocks = num_blocks
51
+ self.window_size = window_size
52
+ self.num_heads = num_heads or model_channels // num_head_channels
53
+ self.mlp_ratio = mlp_ratio
54
+ self.attn_mode = attn_mode
55
+ self.pe_mode = pe_mode
56
+ self.use_fp16 = use_fp16
57
+ self.use_checkpoint = use_checkpoint
58
+ self.qk_rms_norm = qk_rms_norm
59
+ self.dtype = torch.float16 if use_fp16 else torch.float32
60
+
61
+ if pe_mode == "ape":
62
+ self.pos_embedder = AbsolutePositionEmbedder(model_channels)
63
+
64
+ self.input_layer = sp.SparseLinear(in_channels, model_channels)
65
+ self.blocks = nn.ModuleList([
66
+ SparseTransformerBlock(
67
+ model_channels,
68
+ num_heads=self.num_heads,
69
+ mlp_ratio=self.mlp_ratio,
70
+ attn_mode=attn_mode,
71
+ window_size=window_size,
72
+ shift_sequence=shift_sequence,
73
+ shift_window=shift_window,
74
+ serialize_mode=serialize_mode,
75
+ use_checkpoint=self.use_checkpoint,
76
+ use_rope=(pe_mode == "rope"),
77
+ qk_rms_norm=self.qk_rms_norm,
78
+ )
79
+ for attn_mode, window_size, shift_sequence, shift_window, serialize_mode in block_attn_config(self)
80
+ ])
81
+
82
+ @property
83
+ def device(self) -> torch.device:
84
+ """
85
+ Return the device of the model.
86
+ """
87
+ return next(self.parameters()).device
88
+
89
+ def convert_to_fp16(self) -> None:
90
+ """
91
+ Convert the torso of the model to float16.
92
+ """
93
+ self.blocks.apply(convert_module_to_f16)
94
+
95
+ def convert_to_fp32(self) -> None:
96
+ """
97
+ Convert the torso of the model to float32.
98
+ """
99
+ self.blocks.apply(convert_module_to_f32)
100
+
101
+ def initialize_weights(self) -> None:
102
+ # Initialize transformer layers:
103
+ def _basic_init(module):
104
+ if isinstance(module, nn.Linear):
105
+ torch.nn.init.xavier_uniform_(module.weight)
106
+ if module.bias is not None:
107
+ nn.init.constant_(module.bias, 0)
108
+ self.apply(_basic_init)
109
+
110
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
111
+ h = self.input_layer(x)
112
+ if self.pe_mode == "ape":
113
+ h = h + self.pos_embedder(x.coords[:, 1:])
114
+ h = h.type(self.dtype)
115
+ for block in self.blocks:
116
+ h = block(h)
117
+ return h
trellis/models/structured_latent_vae/decoder_gs.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from ...utils.random_utils import hammersley_sequence
7
+ from .base import SparseTransformerBase
8
+ from ...representations import Gaussian
9
+
10
+
11
+ class SLatGaussianDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+ self._build_perturbation()
48
+
49
+ self.initialize_weights()
50
+ if use_fp16:
51
+ self.convert_to_fp16()
52
+
53
+ def initialize_weights(self) -> None:
54
+ super().initialize_weights()
55
+ # Zero-out output layers:
56
+ nn.init.constant_(self.out_layer.weight, 0)
57
+ nn.init.constant_(self.out_layer.bias, 0)
58
+
59
+ def _build_perturbation(self) -> None:
60
+ perturbation = [hammersley_sequence(3, i, self.rep_config['num_gaussians']) for i in range(self.rep_config['num_gaussians'])]
61
+ perturbation = torch.tensor(perturbation).float() * 2 - 1
62
+ perturbation = perturbation / self.rep_config['voxel_size']
63
+ perturbation = torch.atanh(perturbation).to(self.device)
64
+ self.register_buffer('offset_perturbation', perturbation)
65
+
66
+ def _calc_layout(self) -> None:
67
+ self.layout = {
68
+ '_xyz' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
69
+ '_features_dc' : {'shape': (self.rep_config['num_gaussians'], 1, 3), 'size': self.rep_config['num_gaussians'] * 3},
70
+ '_scaling' : {'shape': (self.rep_config['num_gaussians'], 3), 'size': self.rep_config['num_gaussians'] * 3},
71
+ '_rotation' : {'shape': (self.rep_config['num_gaussians'], 4), 'size': self.rep_config['num_gaussians'] * 4},
72
+ '_opacity' : {'shape': (self.rep_config['num_gaussians'], 1), 'size': self.rep_config['num_gaussians']},
73
+ }
74
+ start = 0
75
+ for k, v in self.layout.items():
76
+ v['range'] = (start, start + v['size'])
77
+ start += v['size']
78
+ self.out_channels = start
79
+
80
+ def to_representation(self, x: sp.SparseTensor) -> List[Gaussian]:
81
+ """
82
+ Convert a batch of network outputs to 3D representations.
83
+
84
+ Args:
85
+ x: The [N x * x C] sparse tensor output by the network.
86
+
87
+ Returns:
88
+ list of representations
89
+ """
90
+ ret = []
91
+ for i in range(x.shape[0]):
92
+ representation = Gaussian(
93
+ sh_degree=0,
94
+ aabb=[-0.5, -0.5, -0.5, 1.0, 1.0, 1.0],
95
+ mininum_kernel_size = self.rep_config['3d_filter_kernel_size'],
96
+ scaling_bias = self.rep_config['scaling_bias'],
97
+ opacity_bias = self.rep_config['opacity_bias'],
98
+ scaling_activation = self.rep_config['scaling_activation']
99
+ )
100
+ xyz = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
101
+ for k, v in self.layout.items():
102
+ if k == '_xyz':
103
+ offset = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape'])
104
+ offset = offset * self.rep_config['lr'][k]
105
+ if self.rep_config['perturb_offset']:
106
+ offset = offset + self.offset_perturbation
107
+ offset = torch.tanh(offset) / self.resolution * 0.5 * self.rep_config['voxel_size']
108
+ _xyz = xyz.unsqueeze(1) + offset
109
+ setattr(representation, k, _xyz.flatten(0, 1))
110
+ else:
111
+ feats = x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']).flatten(0, 1)
112
+ feats = feats * self.rep_config['lr'][k]
113
+ setattr(representation, k, feats)
114
+ ret.append(representation)
115
+ return ret
116
+
117
+ def forward(self, x: sp.SparseTensor) -> List[Gaussian]:
118
+ h = super().forward(x)
119
+ h = h.type(x.dtype)
120
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
121
+ h = self.out_layer(h)
122
+ return self.to_representation(h)
trellis/models/structured_latent_vae/decoder_mesh.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
7
+ from ...modules import sparse as sp
8
+ from .base import SparseTransformerBase
9
+ from ...representations import MeshExtractResult
10
+ from ...representations.mesh import SparseFeatures2Mesh, SparseFeatures2MCMesh
11
+
12
+
13
+ class SparseSubdivideBlock3d(nn.Module):
14
+ """
15
+ A 3D subdivide block that can subdivide the sparse tensor.
16
+
17
+ Args:
18
+ channels: channels in the inputs and outputs.
19
+ out_channels: if specified, the number of output channels.
20
+ num_groups: the number of groups for the group norm.
21
+ """
22
+ def __init__(
23
+ self,
24
+ channels: int,
25
+ resolution: int,
26
+ out_channels: Optional[int] = None,
27
+ num_groups: int = 32
28
+ ):
29
+ super().__init__()
30
+ self.channels = channels
31
+ self.resolution = resolution
32
+ self.out_resolution = resolution * 2
33
+ self.out_channels = out_channels or channels
34
+
35
+ self.act_layers = nn.Sequential(
36
+ sp.SparseGroupNorm32(num_groups, channels),
37
+ sp.SparseSiLU()
38
+ )
39
+
40
+ self.sub = sp.SparseSubdivide()
41
+
42
+ self.out_layers = nn.Sequential(
43
+ sp.SparseConv3d(channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}"),
44
+ sp.SparseGroupNorm32(num_groups, self.out_channels),
45
+ sp.SparseSiLU(),
46
+ zero_module(sp.SparseConv3d(self.out_channels, self.out_channels, 3, indice_key=f"res_{self.out_resolution}")),
47
+ )
48
+
49
+ if self.out_channels == channels:
50
+ self.skip_connection = nn.Identity()
51
+ else:
52
+ self.skip_connection = sp.SparseConv3d(channels, self.out_channels, 1, indice_key=f"res_{self.out_resolution}")
53
+
54
+ def forward(self, x: sp.SparseTensor) -> sp.SparseTensor:
55
+ """
56
+ Apply the block to a Tensor, conditioned on a timestep embedding.
57
+
58
+ Args:
59
+ x: an [N x C x ...] Tensor of features.
60
+ Returns:
61
+ an [N x C x ...] Tensor of outputs.
62
+ """
63
+ h = self.act_layers(x)
64
+ h = self.sub(h)
65
+ x = self.sub(x)
66
+ h = self.out_layers(h)
67
+ h = h + self.skip_connection(x)
68
+ return h
69
+
70
+
71
+ class SLatMeshDecoder(SparseTransformerBase):
72
+ def __init__(
73
+ self,
74
+ resolution: int,
75
+ model_channels: int,
76
+ latent_channels: int,
77
+ num_blocks: int,
78
+ num_heads: Optional[int] = None,
79
+ num_head_channels: Optional[int] = 64,
80
+ mlp_ratio: float = 4,
81
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
82
+ window_size: int = 8,
83
+ pe_mode: Literal["ape", "rope"] = "ape",
84
+ use_fp16: bool = True,
85
+ use_checkpoint: bool = False,
86
+ qk_rms_norm: bool = False,
87
+ representation_config: dict = None,
88
+ mesh_extractor: str = "mc",
89
+ ):
90
+ super().__init__(
91
+ in_channels=latent_channels,
92
+ model_channels=model_channels,
93
+ num_blocks=num_blocks,
94
+ num_heads=num_heads,
95
+ num_head_channels=num_head_channels,
96
+ mlp_ratio=mlp_ratio,
97
+ attn_mode=attn_mode,
98
+ window_size=window_size,
99
+ pe_mode=pe_mode,
100
+ use_fp16=use_fp16,
101
+ use_checkpoint=use_checkpoint,
102
+ qk_rms_norm=qk_rms_norm,
103
+ )
104
+ self.resolution = resolution
105
+ self.rep_config = representation_config
106
+ if mesh_extractor == "mc":
107
+ self.mesh_extractor = SparseFeatures2MCMesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
108
+ elif mesh_extractor == "fc":
109
+ self.mesh_extractor = SparseFeatures2Mesh(res=self.resolution*4, use_color=self.rep_config.get('use_color', False))
110
+ else:
111
+ raise ValueError(f"Invalid mesh extractor {mesh_extractor}")
112
+ self.out_channels = self.mesh_extractor.feats_channels
113
+ self.upsample = nn.ModuleList([
114
+ SparseSubdivideBlock3d(
115
+ channels=model_channels,
116
+ resolution=resolution,
117
+ out_channels=model_channels // 4
118
+ ),
119
+ SparseSubdivideBlock3d(
120
+ channels=model_channels // 4,
121
+ resolution=resolution * 2,
122
+ out_channels=model_channels // 8
123
+ )
124
+ ])
125
+ self.out_layer = sp.SparseLinear(model_channels // 8, self.out_channels)
126
+
127
+ self.initialize_weights()
128
+ if use_fp16:
129
+ self.convert_to_fp16()
130
+
131
+ def initialize_weights(self) -> None:
132
+ super().initialize_weights()
133
+ # Zero-out output layers:
134
+ nn.init.constant_(self.out_layer.weight, 0)
135
+ nn.init.constant_(self.out_layer.bias, 0)
136
+
137
+ def convert_to_fp16(self) -> None:
138
+ """
139
+ Convert the torso of the model to float16.
140
+ """
141
+ super().convert_to_fp16()
142
+ self.upsample.apply(convert_module_to_f16)
143
+
144
+ def convert_to_fp32(self) -> None:
145
+ """
146
+ Convert the torso of the model to float32.
147
+ """
148
+ super().convert_to_fp32()
149
+ self.upsample.apply(convert_module_to_f32)
150
+
151
+ def to_representation(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
152
+ """
153
+ Convert a batch of network outputs to 3D representations.
154
+
155
+ Args:
156
+ x: The [N x * x C] sparse tensor output by the network.
157
+
158
+ Returns:
159
+ list of representations
160
+ """
161
+ ret = []
162
+ for i in range(x.shape[0]):
163
+ mesh = self.mesh_extractor(x[i], training=self.training)
164
+ ret.append(mesh)
165
+ return ret
166
+
167
+ def forward(self, x: sp.SparseTensor) -> List[MeshExtractResult]:
168
+ h = super().forward(x)
169
+ for block in self.upsample:
170
+ h = block(h)
171
+ h = h.type(x.dtype)
172
+ h = self.out_layer(h)
173
+ return self.to_representation(h)
trellis/models/structured_latent_vae/decoder_rf.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from ...modules import sparse as sp
7
+ from .base import SparseTransformerBase
8
+ from ...representations import Strivec
9
+
10
+
11
+ class SLatRadianceFieldDecoder(SparseTransformerBase):
12
+ def __init__(
13
+ self,
14
+ resolution: int,
15
+ model_channels: int,
16
+ latent_channels: int,
17
+ num_blocks: int,
18
+ num_heads: Optional[int] = None,
19
+ num_head_channels: Optional[int] = 64,
20
+ mlp_ratio: float = 4,
21
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
22
+ window_size: int = 8,
23
+ pe_mode: Literal["ape", "rope"] = "ape",
24
+ use_fp16: bool = False,
25
+ use_checkpoint: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ representation_config: dict = None,
28
+ ):
29
+ super().__init__(
30
+ in_channels=latent_channels,
31
+ model_channels=model_channels,
32
+ num_blocks=num_blocks,
33
+ num_heads=num_heads,
34
+ num_head_channels=num_head_channels,
35
+ mlp_ratio=mlp_ratio,
36
+ attn_mode=attn_mode,
37
+ window_size=window_size,
38
+ pe_mode=pe_mode,
39
+ use_fp16=use_fp16,
40
+ use_checkpoint=use_checkpoint,
41
+ qk_rms_norm=qk_rms_norm,
42
+ )
43
+ self.resolution = resolution
44
+ self.rep_config = representation_config
45
+ self._calc_layout()
46
+ self.out_layer = sp.SparseLinear(model_channels, self.out_channels)
47
+
48
+ self.initialize_weights()
49
+ if use_fp16:
50
+ self.convert_to_fp16()
51
+
52
+ def initialize_weights(self) -> None:
53
+ super().initialize_weights()
54
+ # Zero-out output layers:
55
+ nn.init.constant_(self.out_layer.weight, 0)
56
+ nn.init.constant_(self.out_layer.bias, 0)
57
+
58
+ def _calc_layout(self) -> None:
59
+ self.layout = {
60
+ 'trivec': {'shape': (self.rep_config['rank'], 3, self.rep_config['dim']), 'size': self.rep_config['rank'] * 3 * self.rep_config['dim']},
61
+ 'density': {'shape': (self.rep_config['rank'],), 'size': self.rep_config['rank']},
62
+ 'features_dc': {'shape': (self.rep_config['rank'], 1, 3), 'size': self.rep_config['rank'] * 3},
63
+ }
64
+ start = 0
65
+ for k, v in self.layout.items():
66
+ v['range'] = (start, start + v['size'])
67
+ start += v['size']
68
+ self.out_channels = start
69
+
70
+ def to_representation(self, x: sp.SparseTensor) -> List[Strivec]:
71
+ """
72
+ Convert a batch of network outputs to 3D representations.
73
+
74
+ Args:
75
+ x: The [N x * x C] sparse tensor output by the network.
76
+
77
+ Returns:
78
+ list of representations
79
+ """
80
+ ret = []
81
+ for i in range(x.shape[0]):
82
+ representation = Strivec(
83
+ sh_degree=0,
84
+ resolution=self.resolution,
85
+ aabb=[-0.5, -0.5, -0.5, 1, 1, 1],
86
+ rank=self.rep_config['rank'],
87
+ dim=self.rep_config['dim'],
88
+ device='cuda',
89
+ )
90
+ representation.density_shift = 0.0
91
+ representation.position = (x.coords[x.layout[i]][:, 1:].float() + 0.5) / self.resolution
92
+ representation.depth = torch.full((representation.position.shape[0], 1), int(np.log2(self.resolution)), dtype=torch.uint8, device='cuda')
93
+ for k, v in self.layout.items():
94
+ setattr(representation, k, x.feats[x.layout[i]][:, v['range'][0]:v['range'][1]].reshape(-1, *v['shape']))
95
+ representation.trivec = representation.trivec + 1
96
+ ret.append(representation)
97
+ return ret
98
+
99
+ def forward(self, x: sp.SparseTensor) -> List[Strivec]:
100
+ h = super().forward(x)
101
+ h = h.type(x.dtype)
102
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
103
+ h = self.out_layer(h)
104
+ return self.to_representation(h)
trellis/models/structured_latent_vae/encoder.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from ...modules import sparse as sp
6
+ from .base import SparseTransformerBase
7
+
8
+
9
+ class SLatEncoder(SparseTransformerBase):
10
+ def __init__(
11
+ self,
12
+ resolution: int,
13
+ in_channels: int,
14
+ model_channels: int,
15
+ latent_channels: int,
16
+ num_blocks: int,
17
+ num_heads: Optional[int] = None,
18
+ num_head_channels: Optional[int] = 64,
19
+ mlp_ratio: float = 4,
20
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "swin",
21
+ window_size: int = 8,
22
+ pe_mode: Literal["ape", "rope"] = "ape",
23
+ use_fp16: bool = False,
24
+ use_checkpoint: bool = False,
25
+ qk_rms_norm: bool = False,
26
+ ):
27
+ super().__init__(
28
+ in_channels=in_channels,
29
+ model_channels=model_channels,
30
+ num_blocks=num_blocks,
31
+ num_heads=num_heads,
32
+ num_head_channels=num_head_channels,
33
+ mlp_ratio=mlp_ratio,
34
+ attn_mode=attn_mode,
35
+ window_size=window_size,
36
+ pe_mode=pe_mode,
37
+ use_fp16=use_fp16,
38
+ use_checkpoint=use_checkpoint,
39
+ qk_rms_norm=qk_rms_norm,
40
+ )
41
+ self.resolution = resolution
42
+ self.out_layer = sp.SparseLinear(model_channels, 2 * latent_channels)
43
+
44
+ self.initialize_weights()
45
+ if use_fp16:
46
+ self.convert_to_fp16()
47
+
48
+ def initialize_weights(self) -> None:
49
+ super().initialize_weights()
50
+ # Zero-out output layers:
51
+ nn.init.constant_(self.out_layer.weight, 0)
52
+ nn.init.constant_(self.out_layer.bias, 0)
53
+
54
+ def forward(self, x: sp.SparseTensor, sample_posterior=True, return_raw=False):
55
+ h = super().forward(x)
56
+ h = h.type(x.dtype)
57
+ h = h.replace(F.layer_norm(h.feats, h.feats.shape[-1:]))
58
+ h = self.out_layer(h)
59
+
60
+ # Sample from the posterior distribution
61
+ mean, logvar = h.feats.chunk(2, dim=-1)
62
+ if sample_posterior:
63
+ std = torch.exp(0.5 * logvar)
64
+ z = mean + std * torch.randn_like(std)
65
+ else:
66
+ z = mean
67
+ z = h.replace(z)
68
+
69
+ if return_raw:
70
+ return z, mean, logvar
71
+ else:
72
+ return z
trellis/modules/attention/__init__.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'flash_attn'
4
+ DEBUG = False
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global BACKEND
10
+ global DEBUG
11
+
12
+ env_attn_backend = os.environ.get('ATTN_BACKEND')
13
+ env_sttn_debug = os.environ.get('ATTN_DEBUG')
14
+
15
+ if env_attn_backend is not None and env_attn_backend in ['xformers', 'flash_attn', 'sdpa', 'naive']:
16
+ BACKEND = env_attn_backend
17
+ if env_sttn_debug is not None:
18
+ DEBUG = env_sttn_debug == '1'
19
+
20
+ print(f"[ATTENTION] Using backend: {BACKEND}")
21
+ print(f"Please wait...") #long wait follows, so give a message.
22
+ print(f"")
23
+
24
+
25
+ __from_env()
26
+
27
+
28
+ def set_backend(backend: Literal['xformers', 'flash_attn']):
29
+ global BACKEND
30
+ BACKEND = backend
31
+
32
+ def set_debug(debug: bool):
33
+ global DEBUG
34
+ DEBUG = debug
35
+
36
+
37
+ from .full_attn import *
38
+ from .modules import *
trellis/modules/attention/full_attn.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from . import DEBUG, BACKEND
5
+
6
+ if BACKEND == 'xformers':
7
+ import xformers.ops as xops
8
+ elif BACKEND == 'flash_attn':
9
+ import flash_attn
10
+ elif BACKEND == 'sdpa':
11
+ from torch.nn.functional import scaled_dot_product_attention as sdpa
12
+ elif BACKEND == 'naive':
13
+ pass
14
+ else:
15
+ raise ValueError(f"Unknown attention backend: {BACKEND}")
16
+
17
+
18
+ __all__ = [
19
+ 'scaled_dot_product_attention',
20
+ ]
21
+
22
+
23
+ def _naive_sdpa(q, k, v):
24
+ """
25
+ Naive implementation of scaled dot product attention.
26
+ """
27
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
28
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
29
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
30
+ scale_factor = 1 / math.sqrt(q.size(-1))
31
+ attn_weight = q @ k.transpose(-2, -1) * scale_factor
32
+ attn_weight = torch.softmax(attn_weight, dim=-1)
33
+ out = attn_weight @ v
34
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
35
+ return out
36
+
37
+
38
+ @overload
39
+ def scaled_dot_product_attention(qkv: torch.Tensor) -> torch.Tensor:
40
+ """
41
+ Apply scaled dot product attention.
42
+
43
+ Args:
44
+ qkv (torch.Tensor): A [N, L, 3, H, C] tensor containing Qs, Ks, and Vs.
45
+ """
46
+ ...
47
+
48
+ @overload
49
+ def scaled_dot_product_attention(q: torch.Tensor, kv: torch.Tensor) -> torch.Tensor:
50
+ """
51
+ Apply scaled dot product attention.
52
+
53
+ Args:
54
+ q (torch.Tensor): A [N, L, H, C] tensor containing Qs.
55
+ kv (torch.Tensor): A [N, L, 2, H, C] tensor containing Ks and Vs.
56
+ """
57
+ ...
58
+
59
+ @overload
60
+ def scaled_dot_product_attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
61
+ """
62
+ Apply scaled dot product attention.
63
+
64
+ Args:
65
+ q (torch.Tensor): A [N, L, H, Ci] tensor containing Qs.
66
+ k (torch.Tensor): A [N, L, H, Ci] tensor containing Ks.
67
+ v (torch.Tensor): A [N, L, H, Co] tensor containing Vs.
68
+
69
+ Note:
70
+ k and v are assumed to have the same coordinate map.
71
+ """
72
+ ...
73
+
74
+ def scaled_dot_product_attention(*args, **kwargs):
75
+ arg_names_dict = {
76
+ 1: ['qkv'],
77
+ 2: ['q', 'kv'],
78
+ 3: ['q', 'k', 'v']
79
+ }
80
+ num_all_args = len(args) + len(kwargs)
81
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
82
+ for key in arg_names_dict[num_all_args][len(args):]:
83
+ assert key in kwargs, f"Missing argument {key}"
84
+
85
+ if num_all_args == 1:
86
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
87
+ assert len(qkv.shape) == 5 and qkv.shape[2] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, L, 3, H, C]"
88
+ device = qkv.device
89
+
90
+ elif num_all_args == 2:
91
+ q = args[0] if len(args) > 0 else kwargs['q']
92
+ kv = args[1] if len(args) > 1 else kwargs['kv']
93
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
94
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
95
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
96
+ device = q.device
97
+
98
+ elif num_all_args == 3:
99
+ q = args[0] if len(args) > 0 else kwargs['q']
100
+ k = args[1] if len(args) > 1 else kwargs['k']
101
+ v = args[2] if len(args) > 2 else kwargs['v']
102
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
103
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
104
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
105
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
106
+ device = q.device
107
+
108
+ if BACKEND == 'xformers':
109
+ if num_all_args == 1:
110
+ q, k, v = qkv.unbind(dim=2)
111
+ elif num_all_args == 2:
112
+ k, v = kv.unbind(dim=2)
113
+ out = xops.memory_efficient_attention(q, k, v)
114
+ elif BACKEND == 'flash_attn':
115
+ if num_all_args == 1:
116
+ out = flash_attn.flash_attn_qkvpacked_func(qkv)
117
+ elif num_all_args == 2:
118
+ out = flash_attn.flash_attn_kvpacked_func(q, kv)
119
+ elif num_all_args == 3:
120
+ out = flash_attn.flash_attn_func(q, k, v)
121
+ elif BACKEND == 'sdpa':
122
+ if num_all_args == 1:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ elif num_all_args == 2:
125
+ k, v = kv.unbind(dim=2)
126
+ q = q.permute(0, 2, 1, 3) # [N, H, L, C]
127
+ k = k.permute(0, 2, 1, 3) # [N, H, L, C]
128
+ v = v.permute(0, 2, 1, 3) # [N, H, L, C]
129
+ out = sdpa(q, k, v) # [N, H, L, C]
130
+ out = out.permute(0, 2, 1, 3) # [N, L, H, C]
131
+ elif BACKEND == 'naive':
132
+ if num_all_args == 1:
133
+ q, k, v = qkv.unbind(dim=2)
134
+ elif num_all_args == 2:
135
+ k, v = kv.unbind(dim=2)
136
+ out = _naive_sdpa(q, k, v)
137
+ else:
138
+ raise ValueError(f"Unknown attention module: {BACKEND}")
139
+
140
+ return out
trellis/modules/attention/modules.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .full_attn import scaled_dot_product_attention
6
+
7
+
8
+ class MultiHeadRMSNorm(nn.Module):
9
+ def __init__(self, dim: int, heads: int):
10
+ super().__init__()
11
+ self.scale = dim ** 0.5
12
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
13
+
14
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
15
+ return (F.normalize(x.float(), dim = -1) * self.gamma * self.scale).to(x.dtype)
16
+
17
+
18
+ class RotaryPositionEmbedder(nn.Module):
19
+ def __init__(self, hidden_size: int, in_channels: int = 3):
20
+ super().__init__()
21
+ assert hidden_size % 2 == 0, "Hidden size must be divisible by 2"
22
+ self.hidden_size = hidden_size
23
+ self.in_channels = in_channels
24
+ self.freq_dim = hidden_size // in_channels // 2
25
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
26
+ self.freqs = 1.0 / (10000 ** self.freqs)
27
+
28
+ def _get_phases(self, indices: torch.Tensor) -> torch.Tensor:
29
+ self.freqs = self.freqs.to(indices.device)
30
+ phases = torch.outer(indices, self.freqs)
31
+ phases = torch.polar(torch.ones_like(phases), phases)
32
+ return phases
33
+
34
+ def _rotary_embedding(self, x: torch.Tensor, phases: torch.Tensor) -> torch.Tensor:
35
+ x_complex = torch.view_as_complex(x.float().reshape(*x.shape[:-1], -1, 2))
36
+ x_rotated = x_complex * phases
37
+ x_embed = torch.view_as_real(x_rotated).reshape(*x_rotated.shape[:-1], -1).to(x.dtype)
38
+ return x_embed
39
+
40
+ def forward(self, q: torch.Tensor, k: torch.Tensor, indices: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
41
+ """
42
+ Args:
43
+ q (sp.SparseTensor): [..., N, D] tensor of queries
44
+ k (sp.SparseTensor): [..., N, D] tensor of keys
45
+ indices (torch.Tensor): [..., N, C] tensor of spatial positions
46
+ """
47
+ if indices is None:
48
+ indices = torch.arange(q.shape[-2], device=q.device)
49
+ if len(q.shape) > 2:
50
+ indices = indices.unsqueeze(0).expand(q.shape[:-2] + (-1,))
51
+
52
+ phases = self._get_phases(indices.reshape(-1)).reshape(*indices.shape[:-1], -1)
53
+ if phases.shape[1] < self.hidden_size // 2:
54
+ phases = torch.cat([phases, torch.polar(
55
+ torch.ones(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device),
56
+ torch.zeros(*phases.shape[:-1], self.hidden_size // 2 - phases.shape[1], device=phases.device)
57
+ )], dim=-1)
58
+ q_embed = self._rotary_embedding(q, phases)
59
+ k_embed = self._rotary_embedding(k, phases)
60
+ return q_embed, k_embed
61
+
62
+
63
+ class MultiHeadAttention(nn.Module):
64
+ def __init__(
65
+ self,
66
+ channels: int,
67
+ num_heads: int,
68
+ ctx_channels: Optional[int]=None,
69
+ type: Literal["self", "cross"] = "self",
70
+ attn_mode: Literal["full", "windowed"] = "full",
71
+ window_size: Optional[int] = None,
72
+ shift_window: Optional[Tuple[int, int, int]] = None,
73
+ qkv_bias: bool = True,
74
+ use_rope: bool = False,
75
+ qk_rms_norm: bool = False,
76
+ ):
77
+ super().__init__()
78
+ assert channels % num_heads == 0
79
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
80
+ assert attn_mode in ["full", "windowed"], f"Invalid attention mode: {attn_mode}"
81
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
82
+
83
+ if attn_mode == "windowed":
84
+ raise NotImplementedError("Windowed attention is not yet implemented")
85
+
86
+ self.channels = channels
87
+ self.head_dim = channels // num_heads
88
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
89
+ self.num_heads = num_heads
90
+ self._type = type
91
+ self.attn_mode = attn_mode
92
+ self.window_size = window_size
93
+ self.shift_window = shift_window
94
+ self.use_rope = use_rope
95
+ self.qk_rms_norm = qk_rms_norm
96
+
97
+ if self._type == "self":
98
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
99
+ else:
100
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
101
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
102
+
103
+ if self.qk_rms_norm:
104
+ self.q_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
105
+ self.k_rms_norm = MultiHeadRMSNorm(self.head_dim, num_heads)
106
+
107
+ self.to_out = nn.Linear(channels, channels)
108
+
109
+ if use_rope:
110
+ self.rope = RotaryPositionEmbedder(channels)
111
+
112
+ def forward(self, x: torch.Tensor, context: Optional[torch.Tensor] = None, indices: Optional[torch.Tensor] = None) -> torch.Tensor:
113
+ B, L, C = x.shape
114
+ if self._type == "self":
115
+ qkv = self.to_qkv(x)
116
+ qkv = qkv.reshape(B, L, 3, self.num_heads, -1)
117
+ if self.use_rope:
118
+ q, k, v = qkv.unbind(dim=2)
119
+ q, k = self.rope(q, k, indices)
120
+ qkv = torch.stack([q, k, v], dim=2)
121
+ if self.attn_mode == "full":
122
+ if self.qk_rms_norm:
123
+ q, k, v = qkv.unbind(dim=2)
124
+ q = self.q_rms_norm(q)
125
+ k = self.k_rms_norm(k)
126
+ h = scaled_dot_product_attention(q, k, v)
127
+ else:
128
+ h = scaled_dot_product_attention(qkv)
129
+ elif self.attn_mode == "windowed":
130
+ raise NotImplementedError("Windowed attention is not yet implemented")
131
+ else:
132
+ Lkv = context.shape[1]
133
+ q = self.to_q(x)
134
+ kv = self.to_kv(context)
135
+ q = q.reshape(B, L, self.num_heads, -1)
136
+ kv = kv.reshape(B, Lkv, 2, self.num_heads, -1)
137
+ if self.qk_rms_norm:
138
+ q = self.q_rms_norm(q)
139
+ k, v = kv.unbind(dim=2)
140
+ k = self.k_rms_norm(k)
141
+ h = scaled_dot_product_attention(q, k, v)
142
+ else:
143
+ h = scaled_dot_product_attention(q, kv)
144
+ h = h.reshape(B, L, -1)
145
+ h = self.to_out(h)
146
+ return h
trellis/modules/norm.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+
4
+
5
+ class LayerNorm32(nn.LayerNorm):
6
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
7
+ needed_dtype = self.weight.dtype if self.weight is not None else x.dtype # to make it work both with float16 and float32
8
+ return super().forward(x.to(needed_dtype)).type(x.dtype)
9
+
10
+
11
+ class GroupNorm32(nn.GroupNorm):
12
+ """
13
+ A GroupNorm layer that converts to float16 before the forward pass.
14
+ """
15
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
16
+ needed_dtype = self.weight.dtype if self.weight is not None else x.dtype # to make it work both with float16 and float32
17
+ return super().forward(x.to(needed_dtype)).type(x.dtype)
18
+
19
+
20
+ class ChannelLayerNorm32(LayerNorm32):
21
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
22
+ DIM = x.dim()
23
+ x = x.permute(0, *range(2, DIM), 1).contiguous()
24
+ x = super().forward(x)
25
+ x = x.permute(0, DIM-1, *range(1, DIM-1)).contiguous()
26
+ return x
27
+
trellis/modules/sparse/__init__.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ BACKEND = 'spconv'
4
+ DEBUG = False
5
+ ATTN = 'flash_attn'
6
+
7
+ def __from_env():
8
+ import os
9
+
10
+ global BACKEND
11
+ global DEBUG
12
+ global ATTN
13
+
14
+ env_sparse_backend = os.environ.get('SPARSE_BACKEND')
15
+ env_sparse_debug = os.environ.get('SPARSE_DEBUG')
16
+ env_sparse_attn = os.environ.get('SPARSE_ATTN_BACKEND')
17
+ if env_sparse_attn is None:
18
+ env_sparse_attn = os.environ.get('ATTN_BACKEND')
19
+
20
+ if env_sparse_backend is not None and env_sparse_backend in ['spconv', 'torchsparse']:
21
+ BACKEND = env_sparse_backend
22
+ if env_sparse_debug is not None:
23
+ DEBUG = env_sparse_debug == '1'
24
+ if env_sparse_attn is not None and env_sparse_attn in ['xformers', 'flash_attn']:
25
+ ATTN = env_sparse_attn
26
+
27
+ print(f"[SPARSE] Backend: {BACKEND}, Attention: {ATTN}")
28
+
29
+
30
+ __from_env()
31
+
32
+
33
+ def set_backend(backend: Literal['spconv', 'torchsparse']):
34
+ global BACKEND
35
+ BACKEND = backend
36
+
37
+ def set_debug(debug: bool):
38
+ global DEBUG
39
+ DEBUG = debug
40
+
41
+ def set_attn(attn: Literal['xformers', 'flash_attn']):
42
+ global ATTN
43
+ ATTN = attn
44
+
45
+
46
+ import importlib
47
+
48
+ __attributes = {
49
+ 'SparseTensor': 'basic',
50
+ 'sparse_batch_broadcast': 'basic',
51
+ 'sparse_batch_op': 'basic',
52
+ 'sparse_cat': 'basic',
53
+ 'sparse_unbind': 'basic',
54
+ 'SparseGroupNorm': 'norm',
55
+ 'SparseLayerNorm': 'norm',
56
+ 'SparseGroupNorm32': 'norm',
57
+ 'SparseLayerNorm32': 'norm',
58
+ 'SparseReLU': 'nonlinearity',
59
+ 'SparseSiLU': 'nonlinearity',
60
+ 'SparseGELU': 'nonlinearity',
61
+ 'SparseActivation': 'nonlinearity',
62
+ 'SparseLinear': 'linear',
63
+ 'sparse_scaled_dot_product_attention': 'attention',
64
+ 'SerializeMode': 'attention',
65
+ 'sparse_serialized_scaled_dot_product_self_attention': 'attention',
66
+ 'sparse_windowed_scaled_dot_product_self_attention': 'attention',
67
+ 'SparseMultiHeadAttention': 'attention',
68
+ 'SparseConv3d': 'conv',
69
+ 'SparseInverseConv3d': 'conv',
70
+ 'SparseDownsample': 'spatial',
71
+ 'SparseUpsample': 'spatial',
72
+ 'SparseSubdivide' : 'spatial'
73
+ }
74
+
75
+ __submodules = ['transformer']
76
+
77
+ __all__ = list(__attributes.keys()) + __submodules
78
+
79
+ def __getattr__(name):
80
+ if name not in globals():
81
+ if name in __attributes:
82
+ module_name = __attributes[name]
83
+ module = importlib.import_module(f".{module_name}", __name__)
84
+ globals()[name] = getattr(module, name)
85
+ elif name in __submodules:
86
+ module = importlib.import_module(f".{name}", __name__)
87
+ globals()[name] = module
88
+ else:
89
+ raise AttributeError(f"module {__name__} has no attribute {name}")
90
+ return globals()[name]
91
+
92
+
93
+ # For Pylance
94
+ if __name__ == '__main__':
95
+ from .basic import *
96
+ from .norm import *
97
+ from .nonlinearity import *
98
+ from .linear import *
99
+ from .attention import *
100
+ from .conv import *
101
+ from .spatial import *
102
+ import transformer
trellis/modules/sparse/attention/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .full_attn import *
2
+ from .serialized_attn import *
3
+ from .windowed_attn import *
4
+ from .modules import *
trellis/modules/sparse/attention/full_attn.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ from .. import SparseTensor
4
+ from .. import DEBUG, ATTN
5
+
6
+ if ATTN == 'xformers':
7
+ import xformers.ops as xops
8
+ elif ATTN == 'flash_attn':
9
+ import flash_attn
10
+ else:
11
+ raise ValueError(f"Unknown attention module: {ATTN}")
12
+
13
+
14
+ __all__ = [
15
+ 'sparse_scaled_dot_product_attention',
16
+ ]
17
+
18
+
19
+ @overload
20
+ def sparse_scaled_dot_product_attention(qkv: SparseTensor) -> SparseTensor:
21
+ """
22
+ Apply scaled dot product attention to a sparse tensor.
23
+
24
+ Args:
25
+ qkv (SparseTensor): A [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
26
+ """
27
+ ...
28
+
29
+ @overload
30
+ def sparse_scaled_dot_product_attention(q: SparseTensor, kv: Union[SparseTensor, torch.Tensor]) -> SparseTensor:
31
+ """
32
+ Apply scaled dot product attention to a sparse tensor.
33
+
34
+ Args:
35
+ q (SparseTensor): A [N, *, H, C] sparse tensor containing Qs.
36
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor or a [N, L, 2, H, C] dense tensor containing Ks and Vs.
37
+ """
38
+ ...
39
+
40
+ @overload
41
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, kv: SparseTensor) -> torch.Tensor:
42
+ """
43
+ Apply scaled dot product attention to a sparse tensor.
44
+
45
+ Args:
46
+ q (SparseTensor): A [N, L, H, C] dense tensor containing Qs.
47
+ kv (SparseTensor or torch.Tensor): A [N, *, 2, H, C] sparse tensor containing Ks and Vs.
48
+ """
49
+ ...
50
+
51
+ @overload
52
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: SparseTensor, v: SparseTensor) -> SparseTensor:
53
+ """
54
+ Apply scaled dot product attention to a sparse tensor.
55
+
56
+ Args:
57
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
58
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
59
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
60
+
61
+ Note:
62
+ k and v are assumed to have the same coordinate map.
63
+ """
64
+ ...
65
+
66
+ @overload
67
+ def sparse_scaled_dot_product_attention(q: SparseTensor, k: torch.Tensor, v: torch.Tensor) -> SparseTensor:
68
+ """
69
+ Apply scaled dot product attention to a sparse tensor.
70
+
71
+ Args:
72
+ q (SparseTensor): A [N, *, H, Ci] sparse tensor containing Qs.
73
+ k (torch.Tensor): A [N, L, H, Ci] dense tensor containing Ks.
74
+ v (torch.Tensor): A [N, L, H, Co] dense tensor containing Vs.
75
+ """
76
+ ...
77
+
78
+ @overload
79
+ def sparse_scaled_dot_product_attention(q: torch.Tensor, k: SparseTensor, v: SparseTensor) -> torch.Tensor:
80
+ """
81
+ Apply scaled dot product attention to a sparse tensor.
82
+
83
+ Args:
84
+ q (torch.Tensor): A [N, L, H, Ci] dense tensor containing Qs.
85
+ k (SparseTensor): A [N, *, H, Ci] sparse tensor containing Ks.
86
+ v (SparseTensor): A [N, *, H, Co] sparse tensor containing Vs.
87
+ """
88
+ ...
89
+
90
+ def sparse_scaled_dot_product_attention(*args, **kwargs):
91
+ arg_names_dict = {
92
+ 1: ['qkv'],
93
+ 2: ['q', 'kv'],
94
+ 3: ['q', 'k', 'v']
95
+ }
96
+ num_all_args = len(args) + len(kwargs)
97
+ assert num_all_args in arg_names_dict, f"Invalid number of arguments, got {num_all_args}, expected 1, 2, or 3"
98
+ for key in arg_names_dict[num_all_args][len(args):]:
99
+ assert key in kwargs, f"Missing argument {key}"
100
+
101
+ if num_all_args == 1:
102
+ qkv = args[0] if len(args) > 0 else kwargs['qkv']
103
+ assert isinstance(qkv, SparseTensor), f"qkv must be a SparseTensor, got {type(qkv)}"
104
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
105
+ device = qkv.device
106
+
107
+ s = qkv
108
+ q_seqlen = [qkv.layout[i].stop - qkv.layout[i].start for i in range(qkv.shape[0])]
109
+ kv_seqlen = q_seqlen
110
+ qkv = qkv.feats # [T, 3, H, C]
111
+
112
+ elif num_all_args == 2:
113
+ q = args[0] if len(args) > 0 else kwargs['q']
114
+ kv = args[1] if len(args) > 1 else kwargs['kv']
115
+ assert isinstance(q, SparseTensor) and isinstance(kv, (SparseTensor, torch.Tensor)) or \
116
+ isinstance(q, torch.Tensor) and isinstance(kv, SparseTensor), \
117
+ f"Invalid types, got {type(q)} and {type(kv)}"
118
+ assert q.shape[0] == kv.shape[0], f"Batch size mismatch, got {q.shape[0]} and {kv.shape[0]}"
119
+ device = q.device
120
+
121
+ if isinstance(q, SparseTensor):
122
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, C]"
123
+ s = q
124
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
125
+ q = q.feats # [T_Q, H, C]
126
+ else:
127
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, C]"
128
+ s = None
129
+ N, L, H, C = q.shape
130
+ q_seqlen = [L] * N
131
+ q = q.reshape(N * L, H, C) # [T_Q, H, C]
132
+
133
+ if isinstance(kv, SparseTensor):
134
+ assert len(kv.shape) == 4 and kv.shape[1] == 2, f"Invalid shape for kv, got {kv.shape}, expected [N, *, 2, H, C]"
135
+ kv_seqlen = [kv.layout[i].stop - kv.layout[i].start for i in range(kv.shape[0])]
136
+ kv = kv.feats # [T_KV, 2, H, C]
137
+ else:
138
+ assert len(kv.shape) == 5, f"Invalid shape for kv, got {kv.shape}, expected [N, L, 2, H, C]"
139
+ N, L, _, H, C = kv.shape
140
+ kv_seqlen = [L] * N
141
+ kv = kv.reshape(N * L, 2, H, C) # [T_KV, 2, H, C]
142
+
143
+ elif num_all_args == 3:
144
+ q = args[0] if len(args) > 0 else kwargs['q']
145
+ k = args[1] if len(args) > 1 else kwargs['k']
146
+ v = args[2] if len(args) > 2 else kwargs['v']
147
+ assert isinstance(q, SparseTensor) and isinstance(k, (SparseTensor, torch.Tensor)) and type(k) == type(v) or \
148
+ isinstance(q, torch.Tensor) and isinstance(k, SparseTensor) and isinstance(v, SparseTensor), \
149
+ f"Invalid types, got {type(q)}, {type(k)}, and {type(v)}"
150
+ assert q.shape[0] == k.shape[0] == v.shape[0], f"Batch size mismatch, got {q.shape[0]}, {k.shape[0]}, and {v.shape[0]}"
151
+ device = q.device
152
+
153
+ if isinstance(q, SparseTensor):
154
+ assert len(q.shape) == 3, f"Invalid shape for q, got {q.shape}, expected [N, *, H, Ci]"
155
+ s = q
156
+ q_seqlen = [q.layout[i].stop - q.layout[i].start for i in range(q.shape[0])]
157
+ q = q.feats # [T_Q, H, Ci]
158
+ else:
159
+ assert len(q.shape) == 4, f"Invalid shape for q, got {q.shape}, expected [N, L, H, Ci]"
160
+ s = None
161
+ N, L, H, CI = q.shape
162
+ q_seqlen = [L] * N
163
+ q = q.reshape(N * L, H, CI) # [T_Q, H, Ci]
164
+
165
+ if isinstance(k, SparseTensor):
166
+ assert len(k.shape) == 3, f"Invalid shape for k, got {k.shape}, expected [N, *, H, Ci]"
167
+ assert len(v.shape) == 3, f"Invalid shape for v, got {v.shape}, expected [N, *, H, Co]"
168
+ kv_seqlen = [k.layout[i].stop - k.layout[i].start for i in range(k.shape[0])]
169
+ k = k.feats # [T_KV, H, Ci]
170
+ v = v.feats # [T_KV, H, Co]
171
+ else:
172
+ assert len(k.shape) == 4, f"Invalid shape for k, got {k.shape}, expected [N, L, H, Ci]"
173
+ assert len(v.shape) == 4, f"Invalid shape for v, got {v.shape}, expected [N, L, H, Co]"
174
+ N, L, H, CI, CO = *k.shape, v.shape[-1]
175
+ kv_seqlen = [L] * N
176
+ k = k.reshape(N * L, H, CI) # [T_KV, H, Ci]
177
+ v = v.reshape(N * L, H, CO) # [T_KV, H, Co]
178
+
179
+ if DEBUG:
180
+ if s is not None:
181
+ for i in range(s.shape[0]):
182
+ assert (s.coords[s.layout[i]] == i).all(), f"SparseScaledDotProductSelfAttention: batch index mismatch"
183
+ if num_all_args in [2, 3]:
184
+ assert q.shape[:2] == [1, sum(q_seqlen)], f"SparseScaledDotProductSelfAttention: q shape mismatch"
185
+ if num_all_args == 3:
186
+ assert k.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: k shape mismatch"
187
+ assert v.shape[:2] == [1, sum(kv_seqlen)], f"SparseScaledDotProductSelfAttention: v shape mismatch"
188
+
189
+ if ATTN == 'xformers':
190
+ if num_all_args == 1:
191
+ q, k, v = qkv.unbind(dim=1)
192
+ elif num_all_args == 2:
193
+ k, v = kv.unbind(dim=1)
194
+ q = q.unsqueeze(0)
195
+ k = k.unsqueeze(0)
196
+ v = v.unsqueeze(0)
197
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(q_seqlen, kv_seqlen)
198
+ out = xops.memory_efficient_attention(q, k, v, mask)[0]
199
+ elif ATTN == 'flash_attn':
200
+ cu_seqlens_q = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(q_seqlen), dim=0)]).int().to(device)
201
+ if num_all_args in [2, 3]:
202
+ cu_seqlens_kv = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(kv_seqlen), dim=0)]).int().to(device)
203
+ if num_all_args == 1:
204
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv, cu_seqlens_q, max(q_seqlen))
205
+ elif num_all_args == 2:
206
+ out = flash_attn.flash_attn_varlen_kvpacked_func(q, kv, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
207
+ elif num_all_args == 3:
208
+ out = flash_attn.flash_attn_varlen_func(q, k, v, cu_seqlens_q, cu_seqlens_kv, max(q_seqlen), max(kv_seqlen))
209
+ else:
210
+ raise ValueError(f"Unknown attention module: {ATTN}")
211
+
212
+ if s is not None:
213
+ return s.replace(out)
214
+ else:
215
+ return out.reshape(N, L, H, -1)
trellis/modules/sparse/attention/modules.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ from .. import SparseTensor
6
+ from .full_attn import sparse_scaled_dot_product_attention
7
+ from .serialized_attn import SerializeMode, sparse_serialized_scaled_dot_product_self_attention
8
+ from .windowed_attn import sparse_windowed_scaled_dot_product_self_attention
9
+ from ...attention import RotaryPositionEmbedder
10
+
11
+
12
+ class SparseMultiHeadRMSNorm(nn.Module):
13
+ def __init__(self, dim: int, heads: int):
14
+ super().__init__()
15
+ self.scale = dim ** 0.5
16
+ self.gamma = nn.Parameter(torch.ones(heads, dim))
17
+
18
+ def forward(self, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
19
+ x_type = x.dtype
20
+ x = x.float()
21
+ if isinstance(x, SparseTensor):
22
+ x = x.replace(F.normalize(x.feats, dim=-1))
23
+ else:
24
+ x = F.normalize(x, dim=-1)
25
+ return (x * self.gamma * self.scale).to(x_type)
26
+
27
+
28
+ class SparseMultiHeadAttention(nn.Module):
29
+ def __init__(
30
+ self,
31
+ channels: int,
32
+ num_heads: int,
33
+ ctx_channels: Optional[int] = None,
34
+ type: Literal["self", "cross"] = "self",
35
+ attn_mode: Literal["full", "serialized", "windowed"] = "full",
36
+ window_size: Optional[int] = None,
37
+ shift_sequence: Optional[int] = None,
38
+ shift_window: Optional[Tuple[int, int, int]] = None,
39
+ serialize_mode: Optional[SerializeMode] = None,
40
+ qkv_bias: bool = True,
41
+ use_rope: bool = False,
42
+ qk_rms_norm: bool = False,
43
+ ):
44
+ super().__init__()
45
+ assert channels % num_heads == 0
46
+ assert type in ["self", "cross"], f"Invalid attention type: {type}"
47
+ assert attn_mode in ["full", "serialized", "windowed"], f"Invalid attention mode: {attn_mode}"
48
+ assert type == "self" or attn_mode == "full", "Cross-attention only supports full attention"
49
+ assert type == "self" or use_rope is False, "Rotary position embeddings only supported for self-attention"
50
+ self.channels = channels
51
+ self.ctx_channels = ctx_channels if ctx_channels is not None else channels
52
+ self.num_heads = num_heads
53
+ self._type = type
54
+ self.attn_mode = attn_mode
55
+ self.window_size = window_size
56
+ self.shift_sequence = shift_sequence
57
+ self.shift_window = shift_window
58
+ self.serialize_mode = serialize_mode
59
+ self.use_rope = use_rope
60
+ self.qk_rms_norm = qk_rms_norm
61
+
62
+ if self._type == "self":
63
+ self.to_qkv = nn.Linear(channels, channels * 3, bias=qkv_bias)
64
+ else:
65
+ self.to_q = nn.Linear(channels, channels, bias=qkv_bias)
66
+ self.to_kv = nn.Linear(self.ctx_channels, channels * 2, bias=qkv_bias)
67
+
68
+ if self.qk_rms_norm:
69
+ self.q_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
70
+ self.k_rms_norm = SparseMultiHeadRMSNorm(channels // num_heads, num_heads)
71
+
72
+ self.to_out = nn.Linear(channels, channels)
73
+
74
+ if use_rope:
75
+ self.rope = RotaryPositionEmbedder(channels)
76
+
77
+ @staticmethod
78
+ def _linear(module: nn.Linear, x: Union[SparseTensor, torch.Tensor]) -> Union[SparseTensor, torch.Tensor]:
79
+ if isinstance(x, SparseTensor):
80
+ return x.replace(module(x.feats))
81
+ else:
82
+ return module(x)
83
+
84
+ @staticmethod
85
+ def _reshape_chs(x: Union[SparseTensor, torch.Tensor], shape: Tuple[int, ...]) -> Union[SparseTensor, torch.Tensor]:
86
+ if isinstance(x, SparseTensor):
87
+ return x.reshape(*shape)
88
+ else:
89
+ return x.reshape(*x.shape[:2], *shape)
90
+
91
+ def _fused_pre(self, x: Union[SparseTensor, torch.Tensor], num_fused: int) -> Union[SparseTensor, torch.Tensor]:
92
+ if isinstance(x, SparseTensor):
93
+ x_feats = x.feats.unsqueeze(0)
94
+ else:
95
+ x_feats = x
96
+ x_feats = x_feats.reshape(*x_feats.shape[:2], num_fused, self.num_heads, -1)
97
+ return x.replace(x_feats.squeeze(0)) if isinstance(x, SparseTensor) else x_feats
98
+
99
+ def _rope(self, qkv: SparseTensor) -> SparseTensor:
100
+ q, k, v = qkv.feats.unbind(dim=1) # [T, H, C]
101
+ q, k = self.rope(q, k, qkv.coords[:, 1:])
102
+ qkv = qkv.replace(torch.stack([q, k, v], dim=1))
103
+ return qkv
104
+
105
+ def forward(self, x: Union[SparseTensor, torch.Tensor], context: Optional[Union[SparseTensor, torch.Tensor]] = None) -> Union[SparseTensor, torch.Tensor]:
106
+ if self._type == "self":
107
+ qkv = self._linear(self.to_qkv, x)
108
+ qkv = self._fused_pre(qkv, num_fused=3)
109
+ if self.use_rope:
110
+ qkv = self._rope(qkv)
111
+ if self.qk_rms_norm:
112
+ q, k, v = qkv.unbind(dim=1)
113
+ q = self.q_rms_norm(q)
114
+ k = self.k_rms_norm(k)
115
+ qkv = qkv.replace(torch.stack([q.feats, k.feats, v.feats], dim=1))
116
+ if self.attn_mode == "full":
117
+ h = sparse_scaled_dot_product_attention(qkv)
118
+ elif self.attn_mode == "serialized":
119
+ h = sparse_serialized_scaled_dot_product_self_attention(
120
+ qkv, self.window_size, serialize_mode=self.serialize_mode, shift_sequence=self.shift_sequence, shift_window=self.shift_window
121
+ )
122
+ elif self.attn_mode == "windowed":
123
+ h = sparse_windowed_scaled_dot_product_self_attention(
124
+ qkv, self.window_size, shift_window=self.shift_window
125
+ )
126
+ else:
127
+ q = self._linear(self.to_q, x)
128
+ q = self._reshape_chs(q, (self.num_heads, -1))
129
+ kv = self._linear(self.to_kv, context)
130
+ kv = self._fused_pre(kv, num_fused=2)
131
+ if self.qk_rms_norm:
132
+ q = self.q_rms_norm(q)
133
+ k, v = kv.unbind(dim=1)
134
+ k = self.k_rms_norm(k)
135
+ kv = kv.replace(torch.stack([k.feats, v.feats], dim=1))
136
+ h = sparse_scaled_dot_product_attention(q, kv)
137
+ h = self._reshape_chs(h, (-1,))
138
+ h = self._linear(self.to_out, h)
139
+ return h
trellis/modules/sparse/attention/serialized_attn.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from enum import Enum
3
+ import torch
4
+ import math
5
+ from .. import SparseTensor
6
+ from .. import DEBUG, ATTN
7
+
8
+ if ATTN == 'xformers':
9
+ import xformers.ops as xops
10
+ elif ATTN == 'flash_attn':
11
+ import flash_attn
12
+ else:
13
+ raise ValueError(f"Unknown attention module: {ATTN}")
14
+
15
+
16
+ __all__ = [
17
+ 'sparse_serialized_scaled_dot_product_self_attention',
18
+ ]
19
+
20
+
21
+ class SerializeMode(Enum):
22
+ Z_ORDER = 0
23
+ Z_ORDER_TRANSPOSED = 1
24
+ HILBERT = 2
25
+ HILBERT_TRANSPOSED = 3
26
+
27
+
28
+ SerializeModes = [
29
+ SerializeMode.Z_ORDER,
30
+ SerializeMode.Z_ORDER_TRANSPOSED,
31
+ SerializeMode.HILBERT,
32
+ SerializeMode.HILBERT_TRANSPOSED
33
+ ]
34
+
35
+
36
+ def calc_serialization(
37
+ tensor: SparseTensor,
38
+ window_size: int,
39
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
40
+ shift_sequence: int = 0,
41
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
42
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int]]:
43
+ """
44
+ Calculate serialization and partitioning for a set of coordinates.
45
+
46
+ Args:
47
+ tensor (SparseTensor): The input tensor.
48
+ window_size (int): The window size to use.
49
+ serialize_mode (SerializeMode): The serialization mode to use.
50
+ shift_sequence (int): The shift of serialized sequence.
51
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
52
+
53
+ Returns:
54
+ (torch.Tensor, torch.Tensor): Forwards and backwards indices.
55
+ """
56
+ fwd_indices = []
57
+ bwd_indices = []
58
+ seq_lens = []
59
+ seq_batch_indices = []
60
+ offsets = [0]
61
+
62
+ if 'vox2seq' not in globals():
63
+ import vox2seq
64
+
65
+ # Serialize the input
66
+ serialize_coords = tensor.coords[:, 1:].clone()
67
+ serialize_coords += torch.tensor(shift_window, dtype=torch.int32, device=tensor.device).reshape(1, 3)
68
+ if serialize_mode == SerializeMode.Z_ORDER:
69
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[0, 1, 2])
70
+ elif serialize_mode == SerializeMode.Z_ORDER_TRANSPOSED:
71
+ code = vox2seq.encode(serialize_coords, mode='z_order', permute=[1, 0, 2])
72
+ elif serialize_mode == SerializeMode.HILBERT:
73
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[0, 1, 2])
74
+ elif serialize_mode == SerializeMode.HILBERT_TRANSPOSED:
75
+ code = vox2seq.encode(serialize_coords, mode='hilbert', permute=[1, 0, 2])
76
+ else:
77
+ raise ValueError(f"Unknown serialize mode: {serialize_mode}")
78
+
79
+ for bi, s in enumerate(tensor.layout):
80
+ num_points = s.stop - s.start
81
+ num_windows = (num_points + window_size - 1) // window_size
82
+ valid_window_size = num_points / num_windows
83
+ to_ordered = torch.argsort(code[s.start:s.stop])
84
+ if num_windows == 1:
85
+ fwd_indices.append(to_ordered)
86
+ bwd_indices.append(torch.zeros_like(to_ordered).scatter_(0, to_ordered, torch.arange(num_points, device=tensor.device)))
87
+ fwd_indices[-1] += s.start
88
+ bwd_indices[-1] += offsets[-1]
89
+ seq_lens.append(num_points)
90
+ seq_batch_indices.append(bi)
91
+ offsets.append(offsets[-1] + seq_lens[-1])
92
+ else:
93
+ # Partition the input
94
+ offset = 0
95
+ mids = [(i + 0.5) * valid_window_size + shift_sequence for i in range(num_windows)]
96
+ split = [math.floor(i * valid_window_size + shift_sequence) for i in range(num_windows + 1)]
97
+ bwd_index = torch.zeros((num_points,), dtype=torch.int64, device=tensor.device)
98
+ for i in range(num_windows):
99
+ mid = mids[i]
100
+ valid_start = split[i]
101
+ valid_end = split[i + 1]
102
+ padded_start = math.floor(mid - 0.5 * window_size)
103
+ padded_end = padded_start + window_size
104
+ fwd_indices.append(to_ordered[torch.arange(padded_start, padded_end, device=tensor.device) % num_points])
105
+ offset += valid_start - padded_start
106
+ bwd_index.scatter_(0, fwd_indices[-1][valid_start-padded_start:valid_end-padded_start], torch.arange(offset, offset + valid_end - valid_start, device=tensor.device))
107
+ offset += padded_end - valid_start
108
+ fwd_indices[-1] += s.start
109
+ seq_lens.extend([window_size] * num_windows)
110
+ seq_batch_indices.extend([bi] * num_windows)
111
+ bwd_indices.append(bwd_index + offsets[-1])
112
+ offsets.append(offsets[-1] + num_windows * window_size)
113
+
114
+ fwd_indices = torch.cat(fwd_indices)
115
+ bwd_indices = torch.cat(bwd_indices)
116
+
117
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
118
+
119
+
120
+ def sparse_serialized_scaled_dot_product_self_attention(
121
+ qkv: SparseTensor,
122
+ window_size: int,
123
+ serialize_mode: SerializeMode = SerializeMode.Z_ORDER,
124
+ shift_sequence: int = 0,
125
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
126
+ ) -> SparseTensor:
127
+ """
128
+ Apply serialized scaled dot product self attention to a sparse tensor.
129
+
130
+ Args:
131
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
132
+ window_size (int): The window size to use.
133
+ serialize_mode (SerializeMode): The serialization mode to use.
134
+ shift_sequence (int): The shift of serialized sequence.
135
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
136
+ shift (int): The shift to use.
137
+ """
138
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
139
+
140
+ serialization_spatial_cache_name = f'serialization_{serialize_mode}_{window_size}_{shift_sequence}_{shift_window}'
141
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
142
+ if serialization_spatial_cache is None:
143
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_serialization(qkv, window_size, serialize_mode, shift_sequence, shift_window)
144
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
145
+ else:
146
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
147
+
148
+ M = fwd_indices.shape[0]
149
+ T = qkv.feats.shape[0]
150
+ H = qkv.feats.shape[2]
151
+ C = qkv.feats.shape[3]
152
+
153
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
154
+
155
+ if DEBUG:
156
+ start = 0
157
+ qkv_coords = qkv.coords[fwd_indices]
158
+ for i in range(len(seq_lens)):
159
+ assert (qkv_coords[start:start+seq_lens[i], 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
160
+ start += seq_lens[i]
161
+
162
+ if all([seq_len == window_size for seq_len in seq_lens]):
163
+ B = len(seq_lens)
164
+ N = window_size
165
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
166
+ if ATTN == 'xformers':
167
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
168
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
169
+ elif ATTN == 'flash_attn':
170
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
171
+ else:
172
+ raise ValueError(f"Unknown attention module: {ATTN}")
173
+ out = out.reshape(B * N, H, C) # [M, H, C]
174
+ else:
175
+ if ATTN == 'xformers':
176
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
177
+ q = q.unsqueeze(0) # [1, M, H, C]
178
+ k = k.unsqueeze(0) # [1, M, H, C]
179
+ v = v.unsqueeze(0) # [1, M, H, C]
180
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
181
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
182
+ elif ATTN == 'flash_attn':
183
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
184
+ .to(qkv.device).int()
185
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
186
+
187
+ out = out[bwd_indices] # [T, H, C]
188
+
189
+ if DEBUG:
190
+ qkv_coords = qkv_coords[bwd_indices]
191
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
192
+
193
+ return qkv.replace(out)
trellis/modules/sparse/attention/windowed_attn.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import math
4
+ from .. import SparseTensor
5
+ from .. import DEBUG, ATTN
6
+
7
+ if ATTN == 'xformers':
8
+ import xformers.ops as xops
9
+ elif ATTN == 'flash_attn':
10
+ import flash_attn
11
+ else:
12
+ raise ValueError(f"Unknown attention module: {ATTN}")
13
+
14
+
15
+ __all__ = [
16
+ 'sparse_windowed_scaled_dot_product_self_attention',
17
+ ]
18
+
19
+
20
+ def calc_window_partition(
21
+ tensor: SparseTensor,
22
+ window_size: Union[int, Tuple[int, ...]],
23
+ shift_window: Union[int, Tuple[int, ...]] = 0
24
+ ) -> Tuple[torch.Tensor, torch.Tensor, List[int], List[int]]:
25
+ """
26
+ Calculate serialization and partitioning for a set of coordinates.
27
+
28
+ Args:
29
+ tensor (SparseTensor): The input tensor.
30
+ window_size (int): The window size to use.
31
+ shift_window (Tuple[int, ...]): The shift of serialized coordinates.
32
+
33
+ Returns:
34
+ (torch.Tensor): Forwards indices.
35
+ (torch.Tensor): Backwards indices.
36
+ (List[int]): Sequence lengths.
37
+ (List[int]): Sequence batch indices.
38
+ """
39
+ DIM = tensor.coords.shape[1] - 1
40
+ shift_window = (shift_window,) * DIM if isinstance(shift_window, int) else shift_window
41
+ window_size = (window_size,) * DIM if isinstance(window_size, int) else window_size
42
+ shifted_coords = tensor.coords.clone().detach()
43
+ shifted_coords[:, 1:] += torch.tensor(shift_window, device=tensor.device, dtype=torch.int32).unsqueeze(0)
44
+
45
+ MAX_COORDS = shifted_coords[:, 1:].max(dim=0).values.tolist()
46
+ NUM_WINDOWS = [math.ceil((mc + 1) / ws) for mc, ws in zip(MAX_COORDS, window_size)]
47
+ OFFSET = torch.cumprod(torch.tensor([1] + NUM_WINDOWS[::-1]), dim=0).tolist()[::-1]
48
+
49
+ shifted_coords[:, 1:] //= torch.tensor(window_size, device=tensor.device, dtype=torch.int32).unsqueeze(0)
50
+ shifted_indices = (shifted_coords * torch.tensor(OFFSET, device=tensor.device, dtype=torch.int32).unsqueeze(0)).sum(dim=1)
51
+ fwd_indices = torch.argsort(shifted_indices)
52
+ bwd_indices = torch.empty_like(fwd_indices)
53
+ bwd_indices[fwd_indices] = torch.arange(fwd_indices.shape[0], device=tensor.device)
54
+ seq_lens = torch.bincount(shifted_indices)
55
+ seq_batch_indices = torch.arange(seq_lens.shape[0], device=tensor.device, dtype=torch.int32) // OFFSET[0]
56
+ mask = seq_lens != 0
57
+ seq_lens = seq_lens[mask].tolist()
58
+ seq_batch_indices = seq_batch_indices[mask].tolist()
59
+
60
+ return fwd_indices, bwd_indices, seq_lens, seq_batch_indices
61
+
62
+
63
+ def sparse_windowed_scaled_dot_product_self_attention(
64
+ qkv: SparseTensor,
65
+ window_size: int,
66
+ shift_window: Tuple[int, int, int] = (0, 0, 0)
67
+ ) -> SparseTensor:
68
+ """
69
+ Apply windowed scaled dot product self attention to a sparse tensor.
70
+
71
+ Args:
72
+ qkv (SparseTensor): [N, *, 3, H, C] sparse tensor containing Qs, Ks, and Vs.
73
+ window_size (int): The window size to use.
74
+ shift_window (Tuple[int, int, int]): The shift of serialized coordinates.
75
+ shift (int): The shift to use.
76
+ """
77
+ assert len(qkv.shape) == 4 and qkv.shape[1] == 3, f"Invalid shape for qkv, got {qkv.shape}, expected [N, *, 3, H, C]"
78
+
79
+ serialization_spatial_cache_name = f'window_partition_{window_size}_{shift_window}'
80
+ serialization_spatial_cache = qkv.get_spatial_cache(serialization_spatial_cache_name)
81
+ if serialization_spatial_cache is None:
82
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = calc_window_partition(qkv, window_size, shift_window)
83
+ qkv.register_spatial_cache(serialization_spatial_cache_name, (fwd_indices, bwd_indices, seq_lens, seq_batch_indices))
84
+ else:
85
+ fwd_indices, bwd_indices, seq_lens, seq_batch_indices = serialization_spatial_cache
86
+
87
+ M = fwd_indices.shape[0]
88
+ T = qkv.feats.shape[0]
89
+ H = qkv.feats.shape[2]
90
+ C = qkv.feats.shape[3]
91
+
92
+ qkv_feats = qkv.feats[fwd_indices] # [M, 3, H, C]
93
+
94
+ if DEBUG:
95
+ start = 0
96
+ qkv_coords = qkv.coords[fwd_indices]
97
+ for i in range(len(seq_lens)):
98
+ seq_coords = qkv_coords[start:start+seq_lens[i]]
99
+ assert (seq_coords[:, 0] == seq_batch_indices[i]).all(), f"SparseWindowedScaledDotProductSelfAttention: batch index mismatch"
100
+ assert (seq_coords[:, 1:].max(dim=0).values - seq_coords[:, 1:].min(dim=0).values < window_size).all(), \
101
+ f"SparseWindowedScaledDotProductSelfAttention: window size exceeded"
102
+ start += seq_lens[i]
103
+
104
+ if all([seq_len == window_size for seq_len in seq_lens]):
105
+ B = len(seq_lens)
106
+ N = window_size
107
+ qkv_feats = qkv_feats.reshape(B, N, 3, H, C)
108
+ if ATTN == 'xformers':
109
+ q, k, v = qkv_feats.unbind(dim=2) # [B, N, H, C]
110
+ out = xops.memory_efficient_attention(q, k, v) # [B, N, H, C]
111
+ elif ATTN == 'flash_attn':
112
+ out = flash_attn.flash_attn_qkvpacked_func(qkv_feats) # [B, N, H, C]
113
+ else:
114
+ raise ValueError(f"Unknown attention module: {ATTN}")
115
+ out = out.reshape(B * N, H, C) # [M, H, C]
116
+ else:
117
+ if ATTN == 'xformers':
118
+ q, k, v = qkv_feats.unbind(dim=1) # [M, H, C]
119
+ q = q.unsqueeze(0) # [1, M, H, C]
120
+ k = k.unsqueeze(0) # [1, M, H, C]
121
+ v = v.unsqueeze(0) # [1, M, H, C]
122
+ mask = xops.fmha.BlockDiagonalMask.from_seqlens(seq_lens)
123
+ out = xops.memory_efficient_attention(q, k, v, mask)[0] # [M, H, C]
124
+ elif ATTN == 'flash_attn':
125
+ cu_seqlens = torch.cat([torch.tensor([0]), torch.cumsum(torch.tensor(seq_lens), dim=0)], dim=0) \
126
+ .to(qkv.device).int()
127
+ out = flash_attn.flash_attn_varlen_qkvpacked_func(qkv_feats, cu_seqlens, max(seq_lens)) # [M, H, C]
128
+
129
+ out = out[bwd_indices] # [T, H, C]
130
+
131
+ if DEBUG:
132
+ qkv_coords = qkv_coords[bwd_indices]
133
+ assert torch.equal(qkv_coords, qkv.coords), "SparseWindowedScaledDotProductSelfAttention: coordinate mismatch"
134
+
135
+ return qkv.replace(out)
trellis/modules/sparse/basic.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from . import BACKEND, DEBUG
5
+ SparseTensorData = None # Lazy import
6
+
7
+
8
+ __all__ = [
9
+ 'SparseTensor',
10
+ 'sparse_batch_broadcast',
11
+ 'sparse_batch_op',
12
+ 'sparse_cat',
13
+ 'sparse_unbind',
14
+ ]
15
+
16
+
17
+ class SparseTensor:
18
+ """
19
+ Sparse tensor with support for both torchsparse and spconv backends.
20
+
21
+ Parameters:
22
+ - feats (torch.Tensor): Features of the sparse tensor.
23
+ - coords (torch.Tensor): Coordinates of the sparse tensor.
24
+ - shape (torch.Size): Shape of the sparse tensor.
25
+ - layout (List[slice]): Layout of the sparse tensor for each batch
26
+ - data (SparseTensorData): Sparse tensor data used for convolusion
27
+
28
+ NOTE:
29
+ - Data corresponding to a same batch should be contiguous.
30
+ - Coords should be in [0, 1023]
31
+ """
32
+ @overload
33
+ def __init__(self, feats: torch.Tensor, coords: torch.Tensor, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
34
+
35
+ @overload
36
+ def __init__(self, data, shape: Optional[torch.Size] = None, layout: Optional[List[slice]] = None, **kwargs): ...
37
+
38
+ def __init__(self, *args, **kwargs):
39
+ # Lazy import of sparse tensor backend
40
+ global SparseTensorData
41
+ if SparseTensorData is None:
42
+ import importlib
43
+ if BACKEND == 'torchsparse':
44
+ SparseTensorData = importlib.import_module('torchsparse').SparseTensor
45
+ elif BACKEND == 'spconv':
46
+ SparseTensorData = importlib.import_module('spconv.pytorch').SparseConvTensor
47
+
48
+ method_id = 0
49
+ if len(args) != 0:
50
+ method_id = 0 if isinstance(args[0], torch.Tensor) else 1
51
+ else:
52
+ method_id = 1 if 'data' in kwargs else 0
53
+
54
+ if method_id == 0:
55
+ feats, coords, shape, layout = args + (None,) * (4 - len(args))
56
+ if 'feats' in kwargs:
57
+ feats = kwargs['feats']
58
+ del kwargs['feats']
59
+ if 'coords' in kwargs:
60
+ coords = kwargs['coords']
61
+ del kwargs['coords']
62
+ if 'shape' in kwargs:
63
+ shape = kwargs['shape']
64
+ del kwargs['shape']
65
+ if 'layout' in kwargs:
66
+ layout = kwargs['layout']
67
+ del kwargs['layout']
68
+
69
+ if shape is None:
70
+ shape = self.__cal_shape(feats, coords)
71
+ if layout is None:
72
+ layout = self.__cal_layout(coords, shape[0])
73
+ if BACKEND == 'torchsparse':
74
+ self.data = SparseTensorData(feats, coords, **kwargs)
75
+ elif BACKEND == 'spconv':
76
+ spatial_shape = list(coords.max(0)[0] + 1)[1:]
77
+ self.data = SparseTensorData(feats.reshape(feats.shape[0], -1), coords, spatial_shape, shape[0], **kwargs)
78
+ self.data._features = feats
79
+ elif method_id == 1:
80
+ data, shape, layout = args + (None,) * (3 - len(args))
81
+ if 'data' in kwargs:
82
+ data = kwargs['data']
83
+ del kwargs['data']
84
+ if 'shape' in kwargs:
85
+ shape = kwargs['shape']
86
+ del kwargs['shape']
87
+ if 'layout' in kwargs:
88
+ layout = kwargs['layout']
89
+ del kwargs['layout']
90
+
91
+ self.data = data
92
+ if shape is None:
93
+ shape = self.__cal_shape(self.feats, self.coords)
94
+ if layout is None:
95
+ layout = self.__cal_layout(self.coords, shape[0])
96
+
97
+ self._shape = shape
98
+ self._layout = layout
99
+ self._scale = kwargs.get('scale', (1, 1, 1))
100
+ self._spatial_cache = kwargs.get('spatial_cache', {})
101
+
102
+ if DEBUG:
103
+ try:
104
+ assert self.feats.shape[0] == self.coords.shape[0], f"Invalid feats shape: {self.feats.shape}, coords shape: {self.coords.shape}"
105
+ assert self.shape == self.__cal_shape(self.feats, self.coords), f"Invalid shape: {self.shape}"
106
+ assert self.layout == self.__cal_layout(self.coords, self.shape[0]), f"Invalid layout: {self.layout}"
107
+ for i in range(self.shape[0]):
108
+ assert torch.all(self.coords[self.layout[i], 0] == i), f"The data of batch {i} is not contiguous"
109
+ except Exception as e:
110
+ print('Debugging information:')
111
+ print(f"- Shape: {self.shape}")
112
+ print(f"- Layout: {self.layout}")
113
+ print(f"- Scale: {self._scale}")
114
+ print(f"- Coords: {self.coords}")
115
+ raise e
116
+
117
+ def __cal_shape(self, feats, coords):
118
+ shape = []
119
+ shape.append(coords[:, 0].max().item() + 1)
120
+ shape.extend([*feats.shape[1:]])
121
+ return torch.Size(shape)
122
+
123
+ def __cal_layout(self, coords, batch_size):
124
+ seq_len = torch.bincount(coords[:, 0], minlength=batch_size)
125
+ offset = torch.cumsum(seq_len, dim=0)
126
+ layout = [slice((offset[i] - seq_len[i]).item(), offset[i].item()) for i in range(batch_size)]
127
+ return layout
128
+
129
+ @property
130
+ def shape(self) -> torch.Size:
131
+ return self._shape
132
+
133
+ def dim(self) -> int:
134
+ return len(self.shape)
135
+
136
+ @property
137
+ def layout(self) -> List[slice]:
138
+ return self._layout
139
+
140
+ @property
141
+ def feats(self) -> torch.Tensor:
142
+ if BACKEND == 'torchsparse':
143
+ return self.data.F
144
+ elif BACKEND == 'spconv':
145
+ return self.data.features
146
+
147
+ @feats.setter
148
+ def feats(self, value: torch.Tensor):
149
+ if BACKEND == 'torchsparse':
150
+ self.data.F = value
151
+ elif BACKEND == 'spconv':
152
+ self.data.features = value
153
+
154
+ @property
155
+ def coords(self) -> torch.Tensor:
156
+ if BACKEND == 'torchsparse':
157
+ return self.data.C
158
+ elif BACKEND == 'spconv':
159
+ return self.data.indices
160
+
161
+ @coords.setter
162
+ def coords(self, value: torch.Tensor):
163
+ if BACKEND == 'torchsparse':
164
+ self.data.C = value
165
+ elif BACKEND == 'spconv':
166
+ self.data.indices = value
167
+
168
+ @property
169
+ def dtype(self):
170
+ return self.feats.dtype
171
+
172
+ @property
173
+ def device(self):
174
+ return self.feats.device
175
+
176
+ @overload
177
+ def to(self, dtype: torch.dtype) -> 'SparseTensor': ...
178
+
179
+ @overload
180
+ def to(self, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None) -> 'SparseTensor': ...
181
+
182
+ def to(self, *args, **kwargs) -> 'SparseTensor':
183
+ device = None
184
+ dtype = None
185
+ if len(args) == 2:
186
+ device, dtype = args
187
+ elif len(args) == 1:
188
+ if isinstance(args[0], torch.dtype):
189
+ dtype = args[0]
190
+ else:
191
+ device = args[0]
192
+ if 'dtype' in kwargs:
193
+ assert dtype is None, "to() received multiple values for argument 'dtype'"
194
+ dtype = kwargs['dtype']
195
+ if 'device' in kwargs:
196
+ assert device is None, "to() received multiple values for argument 'device'"
197
+ device = kwargs['device']
198
+
199
+ new_feats = self.feats.to(device=device, dtype=dtype)
200
+ new_coords = self.coords.to(device=device)
201
+ return self.replace(new_feats, new_coords)
202
+
203
+ def type(self, dtype):
204
+ new_feats = self.feats.type(dtype)
205
+ return self.replace(new_feats)
206
+
207
+ def cpu(self) -> 'SparseTensor':
208
+ new_feats = self.feats.cpu()
209
+ new_coords = self.coords.cpu()
210
+ return self.replace(new_feats, new_coords)
211
+
212
+ def cuda(self) -> 'SparseTensor':
213
+ new_feats = self.feats.cuda()
214
+ new_coords = self.coords.cuda()
215
+ return self.replace(new_feats, new_coords)
216
+
217
+ def half(self) -> 'SparseTensor':
218
+ new_feats = self.feats.half()
219
+ return self.replace(new_feats)
220
+
221
+ def float(self) -> 'SparseTensor':
222
+ new_feats = self.feats.float()
223
+ return self.replace(new_feats)
224
+
225
+ def detach(self) -> 'SparseTensor':
226
+ new_coords = self.coords.detach()
227
+ new_feats = self.feats.detach()
228
+ return self.replace(new_feats, new_coords)
229
+
230
+ def dense(self) -> torch.Tensor:
231
+ if BACKEND == 'torchsparse':
232
+ return self.data.dense()
233
+ elif BACKEND == 'spconv':
234
+ return self.data.dense()
235
+
236
+ def reshape(self, *shape) -> 'SparseTensor':
237
+ new_feats = self.feats.reshape(self.feats.shape[0], *shape)
238
+ return self.replace(new_feats)
239
+
240
+ def unbind(self, dim: int) -> List['SparseTensor']:
241
+ return sparse_unbind(self, dim)
242
+
243
+ def replace(self, feats: torch.Tensor, coords: Optional[torch.Tensor] = None) -> 'SparseTensor':
244
+ new_shape = [self.shape[0]]
245
+ new_shape.extend(feats.shape[1:])
246
+ if BACKEND == 'torchsparse':
247
+ new_data = SparseTensorData(
248
+ feats=feats,
249
+ coords=self.data.coords if coords is None else coords,
250
+ stride=self.data.stride,
251
+ spatial_range=self.data.spatial_range,
252
+ )
253
+ new_data._caches = self.data._caches
254
+ elif BACKEND == 'spconv':
255
+ new_data = SparseTensorData(
256
+ self.data.features.reshape(self.data.features.shape[0], -1),
257
+ self.data.indices,
258
+ self.data.spatial_shape,
259
+ self.data.batch_size,
260
+ self.data.grid,
261
+ self.data.voxel_num,
262
+ self.data.indice_dict
263
+ )
264
+ new_data._features = feats
265
+ new_data.benchmark = self.data.benchmark
266
+ new_data.benchmark_record = self.data.benchmark_record
267
+ new_data.thrust_allocator = self.data.thrust_allocator
268
+ new_data._timer = self.data._timer
269
+ new_data.force_algo = self.data.force_algo
270
+ new_data.int8_scale = self.data.int8_scale
271
+ if coords is not None:
272
+ new_data.indices = coords
273
+ new_tensor = SparseTensor(new_data, shape=torch.Size(new_shape), layout=self.layout, scale=self._scale, spatial_cache=self._spatial_cache)
274
+ return new_tensor
275
+
276
+ @staticmethod
277
+ def full(aabb, dim, value, dtype=torch.float32, device=None) -> 'SparseTensor':
278
+ N, C = dim
279
+ x = torch.arange(aabb[0], aabb[3] + 1)
280
+ y = torch.arange(aabb[1], aabb[4] + 1)
281
+ z = torch.arange(aabb[2], aabb[5] + 1)
282
+ coords = torch.stack(torch.meshgrid(x, y, z, indexing='ij'), dim=-1).reshape(-1, 3)
283
+ coords = torch.cat([
284
+ torch.arange(N).view(-1, 1).repeat(1, coords.shape[0]).view(-1, 1),
285
+ coords.repeat(N, 1),
286
+ ], dim=1).to(dtype=torch.int32, device=device)
287
+ feats = torch.full((coords.shape[0], C), value, dtype=dtype, device=device)
288
+ return SparseTensor(feats=feats, coords=coords)
289
+
290
+ def __merge_sparse_cache(self, other: 'SparseTensor') -> dict:
291
+ new_cache = {}
292
+ for k in set(list(self._spatial_cache.keys()) + list(other._spatial_cache.keys())):
293
+ if k in self._spatial_cache:
294
+ new_cache[k] = self._spatial_cache[k]
295
+ if k in other._spatial_cache:
296
+ if k not in new_cache:
297
+ new_cache[k] = other._spatial_cache[k]
298
+ else:
299
+ new_cache[k].update(other._spatial_cache[k])
300
+ return new_cache
301
+
302
+ def __neg__(self) -> 'SparseTensor':
303
+ return self.replace(-self.feats)
304
+
305
+ def __elemwise__(self, other: Union[torch.Tensor, 'SparseTensor'], op: callable) -> 'SparseTensor':
306
+ if isinstance(other, torch.Tensor):
307
+ try:
308
+ other = torch.broadcast_to(other, self.shape)
309
+ other = sparse_batch_broadcast(self, other)
310
+ except:
311
+ pass
312
+ if isinstance(other, SparseTensor):
313
+ other = other.feats
314
+ new_feats = op(self.feats, other)
315
+ new_tensor = self.replace(new_feats)
316
+ if isinstance(other, SparseTensor):
317
+ new_tensor._spatial_cache = self.__merge_sparse_cache(other)
318
+ return new_tensor
319
+
320
+ def __add__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
321
+ return self.__elemwise__(other, torch.add)
322
+
323
+ def __radd__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
324
+ return self.__elemwise__(other, torch.add)
325
+
326
+ def __sub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
327
+ return self.__elemwise__(other, torch.sub)
328
+
329
+ def __rsub__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
330
+ return self.__elemwise__(other, lambda x, y: torch.sub(y, x))
331
+
332
+ def __mul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
333
+ return self.__elemwise__(other, torch.mul)
334
+
335
+ def __rmul__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
336
+ return self.__elemwise__(other, torch.mul)
337
+
338
+ def __truediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
339
+ return self.__elemwise__(other, torch.div)
340
+
341
+ def __rtruediv__(self, other: Union[torch.Tensor, 'SparseTensor', float]) -> 'SparseTensor':
342
+ return self.__elemwise__(other, lambda x, y: torch.div(y, x))
343
+
344
+ def __getitem__(self, idx):
345
+ if isinstance(idx, int):
346
+ idx = [idx]
347
+ elif isinstance(idx, slice):
348
+ idx = range(*idx.indices(self.shape[0]))
349
+ elif isinstance(idx, torch.Tensor):
350
+ if idx.dtype == torch.bool:
351
+ assert idx.shape == (self.shape[0],), f"Invalid index shape: {idx.shape}"
352
+ idx = idx.nonzero().squeeze(1)
353
+ elif idx.dtype in [torch.int32, torch.int64]:
354
+ assert len(idx.shape) == 1, f"Invalid index shape: {idx.shape}"
355
+ else:
356
+ raise ValueError(f"Unknown index type: {idx.dtype}")
357
+ else:
358
+ raise ValueError(f"Unknown index type: {type(idx)}")
359
+
360
+ coords = []
361
+ feats = []
362
+ for new_idx, old_idx in enumerate(idx):
363
+ coords.append(self.coords[self.layout[old_idx]].clone())
364
+ coords[-1][:, 0] = new_idx
365
+ feats.append(self.feats[self.layout[old_idx]])
366
+ coords = torch.cat(coords, dim=0).contiguous()
367
+ feats = torch.cat(feats, dim=0).contiguous()
368
+ return SparseTensor(feats=feats, coords=coords)
369
+
370
+ def register_spatial_cache(self, key, value) -> None:
371
+ """
372
+ Register a spatial cache.
373
+ The spatial cache can be any thing you want to cache.
374
+ The registery and retrieval of the cache is based on current scale.
375
+ """
376
+ scale_key = str(self._scale)
377
+ if scale_key not in self._spatial_cache:
378
+ self._spatial_cache[scale_key] = {}
379
+ self._spatial_cache[scale_key][key] = value
380
+
381
+ def get_spatial_cache(self, key=None):
382
+ """
383
+ Get a spatial cache.
384
+ """
385
+ scale_key = str(self._scale)
386
+ cur_scale_cache = self._spatial_cache.get(scale_key, {})
387
+ if key is None:
388
+ return cur_scale_cache
389
+ return cur_scale_cache.get(key, None)
390
+
391
+
392
+ def sparse_batch_broadcast(input: SparseTensor, other: torch.Tensor) -> torch.Tensor:
393
+ """
394
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
395
+
396
+ Args:
397
+ input (torch.Tensor): 1D tensor to broadcast.
398
+ target (SparseTensor): Sparse tensor to broadcast to.
399
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
400
+ """
401
+ coords, feats = input.coords, input.feats
402
+ broadcasted = torch.zeros_like(feats)
403
+ for k in range(input.shape[0]):
404
+ broadcasted[input.layout[k]] = other[k]
405
+ return broadcasted
406
+
407
+
408
+ def sparse_batch_op(input: SparseTensor, other: torch.Tensor, op: callable = torch.add) -> SparseTensor:
409
+ """
410
+ Broadcast a 1D tensor to a sparse tensor along the batch dimension then perform an operation.
411
+
412
+ Args:
413
+ input (torch.Tensor): 1D tensor to broadcast.
414
+ target (SparseTensor): Sparse tensor to broadcast to.
415
+ op (callable): Operation to perform after broadcasting. Defaults to torch.add.
416
+ """
417
+ return input.replace(op(input.feats, sparse_batch_broadcast(input, other)))
418
+
419
+
420
+ def sparse_cat(inputs: List[SparseTensor], dim: int = 0) -> SparseTensor:
421
+ """
422
+ Concatenate a list of sparse tensors.
423
+
424
+ Args:
425
+ inputs (List[SparseTensor]): List of sparse tensors to concatenate.
426
+ """
427
+ if dim == 0:
428
+ start = 0
429
+ coords = []
430
+ for input in inputs:
431
+ coords.append(input.coords.clone())
432
+ coords[-1][:, 0] += start
433
+ start += input.shape[0]
434
+ coords = torch.cat(coords, dim=0)
435
+ feats = torch.cat([input.feats for input in inputs], dim=0)
436
+ output = SparseTensor(
437
+ coords=coords,
438
+ feats=feats,
439
+ )
440
+ else:
441
+ feats = torch.cat([input.feats for input in inputs], dim=dim)
442
+ output = inputs[0].replace(feats)
443
+
444
+ return output
445
+
446
+
447
+ def sparse_unbind(input: SparseTensor, dim: int) -> List[SparseTensor]:
448
+ """
449
+ Unbind a sparse tensor along a dimension.
450
+
451
+ Args:
452
+ input (SparseTensor): Sparse tensor to unbind.
453
+ dim (int): Dimension to unbind.
454
+ """
455
+ if dim == 0:
456
+ return [input[i] for i in range(input.shape[0])]
457
+ else:
458
+ feats = input.feats.unbind(dim)
459
+ return [input.replace(f) for f in feats]
trellis/modules/sparse/conv/__init__.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import BACKEND
2
+
3
+
4
+ SPCONV_ALGO = 'auto' # 'auto', 'implicit_gemm', 'native'
5
+
6
+ def __from_env():
7
+ import os
8
+
9
+ global SPCONV_ALGO
10
+ env_spconv_algo = os.environ.get('SPCONV_ALGO')
11
+ if env_spconv_algo is not None and env_spconv_algo in ['auto', 'implicit_gemm', 'native']:
12
+ SPCONV_ALGO = env_spconv_algo
13
+ print(f"[SPARSE][CONV] spconv algo: {SPCONV_ALGO}")
14
+
15
+
16
+ __from_env()
17
+
18
+ if BACKEND == 'torchsparse':
19
+ from .conv_torchsparse import *
20
+ elif BACKEND == 'spconv':
21
+ from .conv_spconv import *
trellis/modules/sparse/conv/conv_spconv.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .. import SparseTensor
4
+ from .. import DEBUG
5
+ from . import SPCONV_ALGO
6
+
7
+ class SparseConv3d(nn.Module):
8
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, padding=None, bias=True, indice_key=None):
9
+ super(SparseConv3d, self).__init__()
10
+ if 'spconv' not in globals():
11
+ import spconv.pytorch as spconv
12
+ algo = None
13
+ if SPCONV_ALGO == 'native':
14
+ algo = spconv.ConvAlgo.Native
15
+ elif SPCONV_ALGO == 'implicit_gemm':
16
+ algo = spconv.ConvAlgo.MaskImplicitGemm
17
+ if stride == 1 and (padding is None):
18
+ self.conv = spconv.SubMConv3d(in_channels, out_channels, kernel_size, dilation=dilation, bias=bias, indice_key=indice_key, algo=algo)
19
+ else:
20
+ self.conv = spconv.SparseConv3d(in_channels, out_channels, kernel_size, stride=stride, dilation=dilation, padding=padding, bias=bias, indice_key=indice_key, algo=algo)
21
+ self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
22
+ self.padding = padding
23
+
24
+ def forward(self, x: SparseTensor) -> SparseTensor:
25
+ spatial_changed = any(s != 1 for s in self.stride) or (self.padding is not None)
26
+ new_data = self.conv(x.data)
27
+ new_shape = [x.shape[0], self.conv.out_channels]
28
+ new_layout = None if spatial_changed else x.layout
29
+
30
+ if spatial_changed and (x.shape[0] != 1):
31
+ # spconv was non-1 stride will break the contiguous of the output tensor, sort by the coords
32
+ fwd = new_data.indices[:, 0].argsort()
33
+ bwd = torch.zeros_like(fwd).scatter_(0, fwd, torch.arange(fwd.shape[0], device=fwd.device))
34
+ sorted_feats = new_data.features[fwd]
35
+ sorted_coords = new_data.indices[fwd]
36
+ unsorted_data = new_data
37
+ new_data = spconv.SparseConvTensor(sorted_feats, sorted_coords, unsorted_data.spatial_shape, unsorted_data.batch_size) # type: ignore
38
+
39
+ out = SparseTensor(
40
+ new_data, shape=torch.Size(new_shape), layout=new_layout,
41
+ scale=tuple([s * stride for s, stride in zip(x._scale, self.stride)]),
42
+ spatial_cache=x._spatial_cache,
43
+ )
44
+
45
+ if spatial_changed and (x.shape[0] != 1):
46
+ out.register_spatial_cache(f'conv_{self.stride}_unsorted_data', unsorted_data)
47
+ out.register_spatial_cache(f'conv_{self.stride}_sort_bwd', bwd)
48
+
49
+ return out
50
+
51
+
52
+ class SparseInverseConv3d(nn.Module):
53
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
54
+ super(SparseInverseConv3d, self).__init__()
55
+ if 'spconv' not in globals():
56
+ import spconv.pytorch as spconv
57
+ self.conv = spconv.SparseInverseConv3d(in_channels, out_channels, kernel_size, bias=bias, indice_key=indice_key)
58
+ self.stride = tuple(stride) if isinstance(stride, (list, tuple)) else (stride, stride, stride)
59
+
60
+ def forward(self, x: SparseTensor) -> SparseTensor:
61
+ spatial_changed = any(s != 1 for s in self.stride)
62
+ if spatial_changed:
63
+ # recover the original spconv order
64
+ data = x.get_spatial_cache(f'conv_{self.stride}_unsorted_data')
65
+ bwd = x.get_spatial_cache(f'conv_{self.stride}_sort_bwd')
66
+ data = data.replace_feature(x.feats[bwd])
67
+ if DEBUG:
68
+ assert torch.equal(data.indices, x.coords[bwd]), 'Recover the original order failed'
69
+ else:
70
+ data = x.data
71
+
72
+ new_data = self.conv(data)
73
+ new_shape = [x.shape[0], self.conv.out_channels]
74
+ new_layout = None if spatial_changed else x.layout
75
+ out = SparseTensor(
76
+ new_data, shape=torch.Size(new_shape), layout=new_layout,
77
+ scale=tuple([s // stride for s, stride in zip(x._scale, self.stride)]),
78
+ spatial_cache=x._spatial_cache,
79
+ )
80
+ return out
trellis/modules/sparse/conv/conv_torchsparse.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from .. import SparseTensor
4
+
5
+
6
+ class SparseConv3d(nn.Module):
7
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
8
+ super(SparseConv3d, self).__init__()
9
+ if 'torchsparse' not in globals():
10
+ import torchsparse
11
+ self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias)
12
+
13
+ def forward(self, x: SparseTensor) -> SparseTensor:
14
+ out = self.conv(x.data)
15
+ new_shape = [x.shape[0], self.conv.out_channels]
16
+ out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
17
+ out._spatial_cache = x._spatial_cache
18
+ out._scale = tuple([s * stride for s, stride in zip(x._scale, self.conv.stride)])
19
+ return out
20
+
21
+
22
+ class SparseInverseConv3d(nn.Module):
23
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, indice_key=None):
24
+ super(SparseInverseConv3d, self).__init__()
25
+ if 'torchsparse' not in globals():
26
+ import torchsparse
27
+ self.conv = torchsparse.nn.Conv3d(in_channels, out_channels, kernel_size, stride, 0, dilation, bias, transposed=True)
28
+
29
+ def forward(self, x: SparseTensor) -> SparseTensor:
30
+ out = self.conv(x.data)
31
+ new_shape = [x.shape[0], self.conv.out_channels]
32
+ out = SparseTensor(out, shape=torch.Size(new_shape), layout=x.layout if all(s == 1 for s in self.conv.stride) else None)
33
+ out._spatial_cache = x._spatial_cache
34
+ out._scale = tuple([s // stride for s, stride in zip(x._scale, self.conv.stride)])
35
+ return out
36
+
37
+
38
+
trellis/modules/sparse/linear.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+
5
+ __all__ = [
6
+ 'SparseLinear'
7
+ ]
8
+
9
+
10
+ class SparseLinear(nn.Linear):
11
+ def __init__(self, in_features, out_features, bias=True):
12
+ super(SparseLinear, self).__init__(in_features, out_features, bias)
13
+
14
+ def forward(self, input: SparseTensor) -> SparseTensor:
15
+ return input.replace(super().forward(input.feats))
trellis/modules/sparse/nonlinearity.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+
5
+ __all__ = [
6
+ 'SparseReLU',
7
+ 'SparseSiLU',
8
+ 'SparseGELU',
9
+ 'SparseActivation'
10
+ ]
11
+
12
+
13
+ class SparseReLU(nn.ReLU):
14
+ def forward(self, input: SparseTensor) -> SparseTensor:
15
+ return input.replace(super().forward(input.feats))
16
+
17
+
18
+ class SparseSiLU(nn.SiLU):
19
+ def forward(self, input: SparseTensor) -> SparseTensor:
20
+ return input.replace(super().forward(input.feats))
21
+
22
+
23
+ class SparseGELU(nn.GELU):
24
+ def forward(self, input: SparseTensor) -> SparseTensor:
25
+ return input.replace(super().forward(input.feats))
26
+
27
+
28
+ class SparseActivation(nn.Module):
29
+ def __init__(self, activation: nn.Module):
30
+ super().__init__()
31
+ self.activation = activation
32
+
33
+ def forward(self, input: SparseTensor) -> SparseTensor:
34
+ return input.replace(self.activation(input.feats))
35
+
trellis/modules/sparse/norm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from . import SparseTensor
4
+ from . import DEBUG
5
+
6
+ __all__ = [
7
+ 'SparseGroupNorm',
8
+ 'SparseLayerNorm',
9
+ 'SparseGroupNorm32',
10
+ 'SparseLayerNorm32',
11
+ ]
12
+
13
+
14
+ class SparseGroupNorm(nn.GroupNorm):
15
+ def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
16
+ super(SparseGroupNorm, self).__init__(num_groups, num_channels, eps, affine)
17
+
18
+ def forward(self, input: SparseTensor) -> SparseTensor:
19
+ nfeats = torch.zeros_like(input.feats)
20
+ for k in range(input.shape[0]):
21
+ if DEBUG:
22
+ assert (input.coords[input.layout[k], 0] == k).all(), f"SparseGroupNorm: batch index mismatch"
23
+ bfeats = input.feats[input.layout[k]]
24
+ bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
25
+ bfeats = super().forward(bfeats)
26
+ bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
27
+ nfeats[input.layout[k]] = bfeats
28
+ return input.replace(nfeats)
29
+
30
+
31
+ class SparseLayerNorm(nn.LayerNorm):
32
+ def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True):
33
+ super(SparseLayerNorm, self).__init__(normalized_shape, eps, elementwise_affine)
34
+
35
+ def forward(self, input: SparseTensor) -> SparseTensor:
36
+ nfeats = torch.zeros_like(input.feats)
37
+ for k in range(input.shape[0]):
38
+ bfeats = input.feats[input.layout[k]]
39
+ bfeats = bfeats.permute(1, 0).reshape(1, input.shape[1], -1)
40
+ bfeats = super().forward(bfeats)
41
+ bfeats = bfeats.reshape(input.shape[1], -1).permute(1, 0)
42
+ nfeats[input.layout[k]] = bfeats
43
+ return input.replace(nfeats)
44
+
45
+
46
+ class SparseGroupNorm32(SparseGroupNorm):
47
+ """
48
+ A GroupNorm layer that converts to float32 before the forward pass.
49
+ """
50
+ def forward(self, x: SparseTensor) -> SparseTensor:
51
+ weight_dtype = self.weight.dtype # to make it work both with float16 and float32
52
+ return super().forward(x.to(weight_dtype)).type(x.dtype)
53
+
54
+ class SparseLayerNorm32(SparseLayerNorm):
55
+ """
56
+ A LayerNorm layer that converts to float32 before the forward pass.
57
+ """
58
+ def forward(self, x: SparseTensor) -> SparseTensor:
59
+ weight_dtype = self.weight.dtype # to make it work both with float16 and float32
60
+ return super().forward(x.to(weight_dtype)).type(x.dtype)
trellis/modules/sparse/spatial.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from . import SparseTensor
5
+
6
+ __all__ = [
7
+ 'SparseDownsample',
8
+ 'SparseUpsample',
9
+ 'SparseSubdivide'
10
+ ]
11
+
12
+
13
+ class SparseDownsample(nn.Module):
14
+ """
15
+ Downsample a sparse tensor by a factor of `factor`.
16
+ Implemented as average pooling.
17
+ """
18
+ def __init__(self, factor: Union[int, Tuple[int, ...], List[int]]):
19
+ super(SparseDownsample, self).__init__()
20
+ self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
21
+
22
+ def forward(self, input: SparseTensor) -> SparseTensor:
23
+ DIM = input.coords.shape[-1] - 1
24
+ factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
25
+ assert DIM == len(factor), 'Input coordinates must have the same dimension as the downsample factor.'
26
+
27
+ coord = list(input.coords.unbind(dim=-1))
28
+ for i, f in enumerate(factor):
29
+ coord[i+1] = coord[i+1] // f
30
+
31
+ MAX = [coord[i+1].max().item() + 1 for i in range(DIM)]
32
+ OFFSET = torch.cumprod(torch.tensor(MAX[::-1]), 0).tolist()[::-1] + [1]
33
+ code = sum([c * o for c, o in zip(coord, OFFSET)])
34
+ code, idx = code.unique(return_inverse=True)
35
+
36
+ new_feats = torch.scatter_reduce(
37
+ torch.zeros(code.shape[0], input.feats.shape[1], device=input.feats.device, dtype=input.feats.dtype),
38
+ dim=0,
39
+ index=idx.unsqueeze(1).expand(-1, input.feats.shape[1]),
40
+ src=input.feats,
41
+ reduce='mean'
42
+ )
43
+ new_coords = torch.stack(
44
+ [code // OFFSET[0]] +
45
+ [(code // OFFSET[i+1]) % MAX[i] for i in range(DIM)],
46
+ dim=-1
47
+ )
48
+ out = SparseTensor(new_feats, new_coords, input.shape,)
49
+ out._scale = tuple([s // f for s, f in zip(input._scale, factor)])
50
+ out._spatial_cache = input._spatial_cache
51
+
52
+ out.register_spatial_cache(f'upsample_{factor}_coords', input.coords)
53
+ out.register_spatial_cache(f'upsample_{factor}_layout', input.layout)
54
+ out.register_spatial_cache(f'upsample_{factor}_idx', idx)
55
+
56
+ return out
57
+
58
+
59
+ class SparseUpsample(nn.Module):
60
+ """
61
+ Upsample a sparse tensor by a factor of `factor`.
62
+ Implemented as nearest neighbor interpolation.
63
+ """
64
+ def __init__(self, factor: Union[int, Tuple[int, int, int], List[int]]):
65
+ super(SparseUpsample, self).__init__()
66
+ self.factor = tuple(factor) if isinstance(factor, (list, tuple)) else factor
67
+
68
+ def forward(self, input: SparseTensor) -> SparseTensor:
69
+ DIM = input.coords.shape[-1] - 1
70
+ factor = self.factor if isinstance(self.factor, tuple) else (self.factor,) * DIM
71
+ assert DIM == len(factor), 'Input coordinates must have the same dimension as the upsample factor.'
72
+
73
+ new_coords = input.get_spatial_cache(f'upsample_{factor}_coords')
74
+ new_layout = input.get_spatial_cache(f'upsample_{factor}_layout')
75
+ idx = input.get_spatial_cache(f'upsample_{factor}_idx')
76
+ if any([x is None for x in [new_coords, new_layout, idx]]):
77
+ raise ValueError('Upsample cache not found. SparseUpsample must be paired with SparseDownsample.')
78
+ new_feats = input.feats[idx]
79
+ out = SparseTensor(new_feats, new_coords, input.shape, new_layout)
80
+ out._scale = tuple([s * f for s, f in zip(input._scale, factor)])
81
+ out._spatial_cache = input._spatial_cache
82
+ return out
83
+
84
+ class SparseSubdivide(nn.Module):
85
+ """
86
+ Upsample a sparse tensor by a factor of `factor`.
87
+ Implemented as nearest neighbor interpolation.
88
+ """
89
+ def __init__(self):
90
+ super(SparseSubdivide, self).__init__()
91
+
92
+ def forward(self, input: SparseTensor) -> SparseTensor:
93
+ DIM = input.coords.shape[-1] - 1
94
+ # upsample scale=2^DIM
95
+ n_cube = torch.ones([2] * DIM, device=input.device, dtype=torch.int)
96
+ n_coords = torch.nonzero(n_cube)
97
+ n_coords = torch.cat([torch.zeros_like(n_coords[:, :1]), n_coords], dim=-1)
98
+ factor = n_coords.shape[0]
99
+ assert factor == 2 ** DIM
100
+ # print(n_coords.shape)
101
+ new_coords = input.coords.clone()
102
+ new_coords[:, 1:] *= 2
103
+ new_coords = new_coords.unsqueeze(1) + n_coords.unsqueeze(0).to(new_coords.dtype)
104
+
105
+ new_feats = input.feats.unsqueeze(1).expand(input.feats.shape[0], factor, *input.feats.shape[1:])
106
+ out = SparseTensor(new_feats.flatten(0, 1), new_coords.flatten(0, 1), input.shape)
107
+ out._scale = input._scale * 2
108
+ out._spatial_cache = input._spatial_cache
109
+ return out
110
+
trellis/modules/sparse/transformer/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .blocks import *
2
+ from .modulated import *
trellis/modules/sparse/transformer/blocks.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..basic import SparseTensor
5
+ from ..linear import SparseLinear
6
+ from ..nonlinearity import SparseGELU
7
+ from ..attention import SparseMultiHeadAttention, SerializeMode
8
+ from ...norm import LayerNorm32
9
+
10
+
11
+ class SparseFeedForwardNet(nn.Module):
12
+ def __init__(self, channels: int, mlp_ratio: float = 4.0):
13
+ super().__init__()
14
+ self.mlp = nn.Sequential(
15
+ SparseLinear(channels, int(channels * mlp_ratio)),
16
+ SparseGELU(approximate="tanh"),
17
+ SparseLinear(int(channels * mlp_ratio), channels),
18
+ )
19
+
20
+ def forward(self, x: SparseTensor) -> SparseTensor:
21
+ return self.mlp(x)
22
+
23
+
24
+ class SparseTransformerBlock(nn.Module):
25
+ """
26
+ Sparse Transformer block (MSA + FFN).
27
+ """
28
+ def __init__(
29
+ self,
30
+ channels: int,
31
+ num_heads: int,
32
+ mlp_ratio: float = 4.0,
33
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
34
+ window_size: Optional[int] = None,
35
+ shift_sequence: Optional[int] = None,
36
+ shift_window: Optional[Tuple[int, int, int]] = None,
37
+ serialize_mode: Optional[SerializeMode] = None,
38
+ use_checkpoint: bool = False,
39
+ use_rope: bool = False,
40
+ qk_rms_norm: bool = False,
41
+ qkv_bias: bool = True,
42
+ ln_affine: bool = False,
43
+ ):
44
+ super().__init__()
45
+ self.use_checkpoint = use_checkpoint
46
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
47
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
48
+ self.attn = SparseMultiHeadAttention(
49
+ channels,
50
+ num_heads=num_heads,
51
+ attn_mode=attn_mode,
52
+ window_size=window_size,
53
+ shift_sequence=shift_sequence,
54
+ shift_window=shift_window,
55
+ serialize_mode=serialize_mode,
56
+ qkv_bias=qkv_bias,
57
+ use_rope=use_rope,
58
+ qk_rms_norm=qk_rms_norm,
59
+ )
60
+ self.mlp = SparseFeedForwardNet(
61
+ channels,
62
+ mlp_ratio=mlp_ratio,
63
+ )
64
+
65
+ def _forward(self, x: SparseTensor) -> SparseTensor:
66
+ h = x.replace(self.norm1(x.feats))
67
+ h = self.attn(h)
68
+ x = x + h
69
+ h = x.replace(self.norm2(x.feats))
70
+ h = self.mlp(h)
71
+ x = x + h
72
+ return x
73
+
74
+ def forward(self, x: SparseTensor) -> SparseTensor:
75
+ if self.use_checkpoint:
76
+ return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
77
+ else:
78
+ return self._forward(x)
79
+
80
+
81
+ class SparseTransformerCrossBlock(nn.Module):
82
+ """
83
+ Sparse Transformer cross-attention block (MSA + MCA + FFN).
84
+ """
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ ctx_channels: int,
89
+ num_heads: int,
90
+ mlp_ratio: float = 4.0,
91
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
+ window_size: Optional[int] = None,
93
+ shift_sequence: Optional[int] = None,
94
+ shift_window: Optional[Tuple[int, int, int]] = None,
95
+ serialize_mode: Optional[SerializeMode] = None,
96
+ use_checkpoint: bool = False,
97
+ use_rope: bool = False,
98
+ qk_rms_norm: bool = False,
99
+ qk_rms_norm_cross: bool = False,
100
+ qkv_bias: bool = True,
101
+ ln_affine: bool = False,
102
+ ):
103
+ super().__init__()
104
+ self.use_checkpoint = use_checkpoint
105
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
106
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
107
+ self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
108
+ self.self_attn = SparseMultiHeadAttention(
109
+ channels,
110
+ num_heads=num_heads,
111
+ type="self",
112
+ attn_mode=attn_mode,
113
+ window_size=window_size,
114
+ shift_sequence=shift_sequence,
115
+ shift_window=shift_window,
116
+ serialize_mode=serialize_mode,
117
+ qkv_bias=qkv_bias,
118
+ use_rope=use_rope,
119
+ qk_rms_norm=qk_rms_norm,
120
+ )
121
+ self.cross_attn = SparseMultiHeadAttention(
122
+ channels,
123
+ ctx_channels=ctx_channels,
124
+ num_heads=num_heads,
125
+ type="cross",
126
+ attn_mode="full",
127
+ qkv_bias=qkv_bias,
128
+ qk_rms_norm=qk_rms_norm_cross,
129
+ )
130
+ self.mlp = SparseFeedForwardNet(
131
+ channels,
132
+ mlp_ratio=mlp_ratio,
133
+ )
134
+
135
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor):
136
+ h = x.replace(self.norm1(x.feats))
137
+ h = self.self_attn(h)
138
+ x = x + h
139
+ h = x.replace(self.norm2(x.feats))
140
+ h = self.cross_attn(h, context)
141
+ x = x + h
142
+ h = x.replace(self.norm3(x.feats))
143
+ h = self.mlp(h)
144
+ x = x + h
145
+ return x
146
+
147
+ def forward(self, x: SparseTensor, context: torch.Tensor):
148
+ if self.use_checkpoint:
149
+ return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
150
+ else:
151
+ return self._forward(x, context)
trellis/modules/sparse/transformer/modulated.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..basic import SparseTensor
5
+ from ..attention import SparseMultiHeadAttention, SerializeMode
6
+ from ...norm import LayerNorm32
7
+ from .blocks import SparseFeedForwardNet
8
+
9
+
10
+ class ModulatedSparseTransformerBlock(nn.Module):
11
+ """
12
+ Sparse Transformer block (MSA + FFN) with adaptive layer norm conditioning.
13
+ """
14
+ def __init__(
15
+ self,
16
+ channels: int,
17
+ num_heads: int,
18
+ mlp_ratio: float = 4.0,
19
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
20
+ window_size: Optional[int] = None,
21
+ shift_sequence: Optional[int] = None,
22
+ shift_window: Optional[Tuple[int, int, int]] = None,
23
+ serialize_mode: Optional[SerializeMode] = None,
24
+ use_checkpoint: bool = False,
25
+ use_rope: bool = False,
26
+ qk_rms_norm: bool = False,
27
+ qkv_bias: bool = True,
28
+ share_mod: bool = False,
29
+ ):
30
+ super().__init__()
31
+ self.use_checkpoint = use_checkpoint
32
+ self.share_mod = share_mod
33
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
34
+ self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
35
+ self.attn = SparseMultiHeadAttention(
36
+ channels,
37
+ num_heads=num_heads,
38
+ attn_mode=attn_mode,
39
+ window_size=window_size,
40
+ shift_sequence=shift_sequence,
41
+ shift_window=shift_window,
42
+ serialize_mode=serialize_mode,
43
+ qkv_bias=qkv_bias,
44
+ use_rope=use_rope,
45
+ qk_rms_norm=qk_rms_norm,
46
+ )
47
+ self.mlp = SparseFeedForwardNet(
48
+ channels,
49
+ mlp_ratio=mlp_ratio,
50
+ )
51
+ if not share_mod:
52
+ self.adaLN_modulation = nn.Sequential(
53
+ nn.SiLU(),
54
+ nn.Linear(channels, 6 * channels, bias=True)
55
+ )
56
+
57
+ def _forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
58
+ if self.share_mod:
59
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
60
+ else:
61
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
62
+ h = x.replace(self.norm1(x.feats))
63
+ h = h * (1 + scale_msa) + shift_msa
64
+ h = self.attn(h)
65
+ h = h * gate_msa
66
+ x = x + h
67
+ h = x.replace(self.norm2(x.feats))
68
+ h = h * (1 + scale_mlp) + shift_mlp
69
+ h = self.mlp(h)
70
+ h = h * gate_mlp
71
+ x = x + h
72
+ return x
73
+
74
+ def forward(self, x: SparseTensor, mod: torch.Tensor) -> SparseTensor:
75
+ if self.use_checkpoint:
76
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
77
+ else:
78
+ return self._forward(x, mod)
79
+
80
+
81
+ class ModulatedSparseTransformerCrossBlock(nn.Module):
82
+ """
83
+ Sparse Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
84
+ """
85
+ def __init__(
86
+ self,
87
+ channels: int,
88
+ ctx_channels: int,
89
+ num_heads: int,
90
+ mlp_ratio: float = 4.0,
91
+ attn_mode: Literal["full", "shift_window", "shift_sequence", "shift_order", "swin"] = "full",
92
+ window_size: Optional[int] = None,
93
+ shift_sequence: Optional[int] = None,
94
+ shift_window: Optional[Tuple[int, int, int]] = None,
95
+ serialize_mode: Optional[SerializeMode] = None,
96
+ use_checkpoint: bool = False,
97
+ use_rope: bool = False,
98
+ qk_rms_norm: bool = False,
99
+ qk_rms_norm_cross: bool = False,
100
+ qkv_bias: bool = True,
101
+ share_mod: bool = False,
102
+
103
+ ):
104
+ super().__init__()
105
+ self.use_checkpoint = use_checkpoint
106
+ self.share_mod = share_mod
107
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
108
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
109
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
110
+ self.self_attn = SparseMultiHeadAttention(
111
+ channels,
112
+ num_heads=num_heads,
113
+ type="self",
114
+ attn_mode=attn_mode,
115
+ window_size=window_size,
116
+ shift_sequence=shift_sequence,
117
+ shift_window=shift_window,
118
+ serialize_mode=serialize_mode,
119
+ qkv_bias=qkv_bias,
120
+ use_rope=use_rope,
121
+ qk_rms_norm=qk_rms_norm,
122
+ )
123
+ self.cross_attn = SparseMultiHeadAttention(
124
+ channels,
125
+ ctx_channels=ctx_channels,
126
+ num_heads=num_heads,
127
+ type="cross",
128
+ attn_mode="full",
129
+ qkv_bias=qkv_bias,
130
+ qk_rms_norm=qk_rms_norm_cross,
131
+ )
132
+ self.mlp = SparseFeedForwardNet(
133
+ channels,
134
+ mlp_ratio=mlp_ratio,
135
+ )
136
+ if not share_mod:
137
+ self.adaLN_modulation = nn.Sequential(
138
+ nn.SiLU(),
139
+ nn.Linear(channels, 6 * channels, bias=True)
140
+ )
141
+
142
+ def _forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
143
+ if self.share_mod:
144
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
145
+ else:
146
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
147
+ h = x.replace(self.norm1(x.feats))
148
+ h = h * (1 + scale_msa) + shift_msa
149
+ h = self.self_attn(h)
150
+ h = h * gate_msa
151
+ x = x + h
152
+ h = x.replace(self.norm2(x.feats))
153
+ h = self.cross_attn(h, context)
154
+ x = x + h
155
+ h = x.replace(self.norm3(x.feats))
156
+ h = h * (1 + scale_mlp) + shift_mlp
157
+ h = self.mlp(h)
158
+ h = h * gate_mlp
159
+ x = x + h
160
+ return x
161
+
162
+ def forward(self, x: SparseTensor, mod: torch.Tensor, context: torch.Tensor) -> SparseTensor:
163
+ if self.use_checkpoint:
164
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
165
+ else:
166
+ return self._forward(x, mod, context)
trellis/modules/spatial.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+
4
+ def pixel_shuffle_3d(x: torch.Tensor, scale_factor: int) -> torch.Tensor:
5
+ """
6
+ 3D pixel shuffle.
7
+ """
8
+ B, C, H, W, D = x.shape
9
+ C_ = C // scale_factor**3
10
+ x = x.reshape(B, C_, scale_factor, scale_factor, scale_factor, H, W, D)
11
+ x = x.permute(0, 1, 5, 2, 6, 3, 7, 4)
12
+ x = x.reshape(B, C_, H*scale_factor, W*scale_factor, D*scale_factor)
13
+ return x
14
+
15
+
16
+ def patchify(x: torch.Tensor, patch_size: int):
17
+ """
18
+ Patchify a tensor.
19
+
20
+ Args:
21
+ x (torch.Tensor): (N, C, *spatial) tensor
22
+ patch_size (int): Patch size
23
+ """
24
+ DIM = x.dim() - 2
25
+ for d in range(2, DIM + 2):
26
+ assert x.shape[d] % patch_size == 0, f"Dimension {d} of input tensor must be divisible by patch size, got {x.shape[d]} and {patch_size}"
27
+
28
+ x = x.reshape(*x.shape[:2], *sum([[x.shape[d] // patch_size, patch_size] for d in range(2, DIM + 2)], []))
29
+ x = x.permute(0, 1, *([2 * i + 3 for i in range(DIM)] + [2 * i + 2 for i in range(DIM)]))
30
+ x = x.reshape(x.shape[0], x.shape[1] * (patch_size ** DIM), *(x.shape[-DIM:]))
31
+ return x
32
+
33
+
34
+ def unpatchify(x: torch.Tensor, patch_size: int):
35
+ """
36
+ Unpatchify a tensor.
37
+
38
+ Args:
39
+ x (torch.Tensor): (N, C, *spatial) tensor
40
+ patch_size (int): Patch size
41
+ """
42
+ DIM = x.dim() - 2
43
+ assert x.shape[1] % (patch_size ** DIM) == 0, f"Second dimension of input tensor must be divisible by patch size to unpatchify, got {x.shape[1]} and {patch_size ** DIM}"
44
+
45
+ x = x.reshape(x.shape[0], x.shape[1] // (patch_size ** DIM), *([patch_size] * DIM), *(x.shape[-DIM:]))
46
+ x = x.permute(0, 1, *(sum([[2 + DIM + i, 2 + i] for i in range(DIM)], [])))
47
+ x = x.reshape(x.shape[0], x.shape[1], *[x.shape[2 + 2 * i] * patch_size for i in range(DIM)])
48
+ return x
trellis/modules/transformer/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .blocks import *
2
+ from .modulated import *
trellis/modules/transformer/blocks.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..attention import MultiHeadAttention
5
+ from ..norm import LayerNorm32
6
+
7
+
8
+ class AbsolutePositionEmbedder(nn.Module):
9
+ """
10
+ Embeds spatial positions into vector representations.
11
+ """
12
+ def __init__(self, channels: int, in_channels: int = 3):
13
+ super().__init__()
14
+ self.channels = channels
15
+ self.in_channels = in_channels
16
+ self.freq_dim = channels // in_channels // 2
17
+ self.freqs = torch.arange(self.freq_dim, dtype=torch.float32) / self.freq_dim
18
+ self.freqs = 1.0 / (10000 ** self.freqs)
19
+
20
+ def _sin_cos_embedding(self, x: torch.Tensor) -> torch.Tensor:
21
+ """
22
+ Create sinusoidal position embeddings.
23
+
24
+ Args:
25
+ x: a 1-D Tensor of N indices
26
+
27
+ Returns:
28
+ an (N, D) Tensor of positional embeddings.
29
+ """
30
+ self.freqs = self.freqs.to(x.device)
31
+ out = torch.outer(x, self.freqs)
32
+ out = torch.cat([torch.sin(out), torch.cos(out)], dim=-1)
33
+ return out
34
+
35
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
36
+ """
37
+ Args:
38
+ x (torch.Tensor): (N, D) tensor of spatial positions
39
+ """
40
+ N, D = x.shape
41
+ assert D == self.in_channels, "Input dimension must match number of input channels"
42
+ embed = self._sin_cos_embedding(x.reshape(-1))
43
+ embed = embed.reshape(N, -1)
44
+ if embed.shape[1] < self.channels:
45
+ embed = torch.cat([embed, torch.zeros(N, self.channels - embed.shape[1], device=embed.device)], dim=-1)
46
+ return embed
47
+
48
+
49
+ class FeedForwardNet(nn.Module):
50
+ def __init__(self, channels: int, mlp_ratio: float = 4.0):
51
+ super().__init__()
52
+ self.mlp = nn.Sequential(
53
+ nn.Linear(channels, int(channels * mlp_ratio)),
54
+ nn.GELU(approximate="tanh"),
55
+ nn.Linear(int(channels * mlp_ratio), channels),
56
+ )
57
+
58
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
59
+ return self.mlp(x)
60
+
61
+
62
+ class TransformerBlock(nn.Module):
63
+ """
64
+ Transformer block (MSA + FFN).
65
+ """
66
+ def __init__(
67
+ self,
68
+ channels: int,
69
+ num_heads: int,
70
+ mlp_ratio: float = 4.0,
71
+ attn_mode: Literal["full", "windowed"] = "full",
72
+ window_size: Optional[int] = None,
73
+ shift_window: Optional[int] = None,
74
+ use_checkpoint: bool = False,
75
+ use_rope: bool = False,
76
+ qk_rms_norm: bool = False,
77
+ qkv_bias: bool = True,
78
+ ln_affine: bool = False,
79
+ ):
80
+ super().__init__()
81
+ self.use_checkpoint = use_checkpoint
82
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
83
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
84
+ self.attn = MultiHeadAttention(
85
+ channels,
86
+ num_heads=num_heads,
87
+ attn_mode=attn_mode,
88
+ window_size=window_size,
89
+ shift_window=shift_window,
90
+ qkv_bias=qkv_bias,
91
+ use_rope=use_rope,
92
+ qk_rms_norm=qk_rms_norm,
93
+ )
94
+ self.mlp = FeedForwardNet(
95
+ channels,
96
+ mlp_ratio=mlp_ratio,
97
+ )
98
+
99
+ def _forward(self, x: torch.Tensor) -> torch.Tensor:
100
+ h = self.norm1(x)
101
+ h = self.attn(h)
102
+ x = x + h
103
+ h = self.norm2(x)
104
+ h = self.mlp(h)
105
+ x = x + h
106
+ return x
107
+
108
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
109
+ if self.use_checkpoint:
110
+ return torch.utils.checkpoint.checkpoint(self._forward, x, use_reentrant=False)
111
+ else:
112
+ return self._forward(x)
113
+
114
+
115
+ class TransformerCrossBlock(nn.Module):
116
+ """
117
+ Transformer cross-attention block (MSA + MCA + FFN).
118
+ """
119
+ def __init__(
120
+ self,
121
+ channels: int,
122
+ ctx_channels: int,
123
+ num_heads: int,
124
+ mlp_ratio: float = 4.0,
125
+ attn_mode: Literal["full", "windowed"] = "full",
126
+ window_size: Optional[int] = None,
127
+ shift_window: Optional[Tuple[int, int, int]] = None,
128
+ use_checkpoint: bool = False,
129
+ use_rope: bool = False,
130
+ qk_rms_norm: bool = False,
131
+ qk_rms_norm_cross: bool = False,
132
+ qkv_bias: bool = True,
133
+ ln_affine: bool = False,
134
+ ):
135
+ super().__init__()
136
+ self.use_checkpoint = use_checkpoint
137
+ self.norm1 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
138
+ self.norm2 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
139
+ self.norm3 = LayerNorm32(channels, elementwise_affine=ln_affine, eps=1e-6)
140
+ self.self_attn = MultiHeadAttention(
141
+ channels,
142
+ num_heads=num_heads,
143
+ type="self",
144
+ attn_mode=attn_mode,
145
+ window_size=window_size,
146
+ shift_window=shift_window,
147
+ qkv_bias=qkv_bias,
148
+ use_rope=use_rope,
149
+ qk_rms_norm=qk_rms_norm,
150
+ )
151
+ self.cross_attn = MultiHeadAttention(
152
+ channels,
153
+ ctx_channels=ctx_channels,
154
+ num_heads=num_heads,
155
+ type="cross",
156
+ attn_mode="full",
157
+ qkv_bias=qkv_bias,
158
+ qk_rms_norm=qk_rms_norm_cross,
159
+ )
160
+ self.mlp = FeedForwardNet(
161
+ channels,
162
+ mlp_ratio=mlp_ratio,
163
+ )
164
+
165
+ def _forward(self, x: torch.Tensor, context: torch.Tensor):
166
+ h = self.norm1(x)
167
+ h = self.self_attn(h)
168
+ x = x + h
169
+ h = self.norm2(x)
170
+ h = self.cross_attn(h, context)
171
+ x = x + h
172
+ h = self.norm3(x)
173
+ h = self.mlp(h)
174
+ x = x + h
175
+ return x
176
+
177
+ def forward(self, x: torch.Tensor, context: torch.Tensor):
178
+ if self.use_checkpoint:
179
+ return torch.utils.checkpoint.checkpoint(self._forward, x, context, use_reentrant=False)
180
+ else:
181
+ return self._forward(x, context)
182
+
trellis/modules/transformer/modulated.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from ..attention import MultiHeadAttention
5
+ from ..norm import LayerNorm32
6
+ from .blocks import FeedForwardNet
7
+
8
+
9
+ class ModulatedTransformerBlock(nn.Module):
10
+ """
11
+ Transformer block (MSA + FFN) with adaptive layer norm conditioning.
12
+ """
13
+ def __init__(
14
+ self,
15
+ channels: int,
16
+ num_heads: int,
17
+ mlp_ratio: float = 4.0,
18
+ attn_mode: Literal["full", "windowed"] = "full",
19
+ window_size: Optional[int] = None,
20
+ shift_window: Optional[Tuple[int, int, int]] = None,
21
+ use_checkpoint: bool = False,
22
+ use_rope: bool = False,
23
+ qk_rms_norm: bool = False,
24
+ qkv_bias: bool = True,
25
+ share_mod: bool = False,
26
+ ):
27
+ super().__init__()
28
+ self.use_checkpoint = use_checkpoint
29
+ self.share_mod = share_mod
30
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
31
+ self.norm2 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
32
+ self.attn = MultiHeadAttention(
33
+ channels,
34
+ num_heads=num_heads,
35
+ attn_mode=attn_mode,
36
+ window_size=window_size,
37
+ shift_window=shift_window,
38
+ qkv_bias=qkv_bias,
39
+ use_rope=use_rope,
40
+ qk_rms_norm=qk_rms_norm,
41
+ )
42
+ self.mlp = FeedForwardNet(
43
+ channels,
44
+ mlp_ratio=mlp_ratio,
45
+ )
46
+ if not share_mod:
47
+ self.adaLN_modulation = nn.Sequential(
48
+ nn.SiLU(),
49
+ nn.Linear(channels, 6 * channels, bias=True)
50
+ )
51
+
52
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
53
+ if self.share_mod:
54
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
55
+ else:
56
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
57
+ h = self.norm1(x)
58
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
59
+ h = self.attn(h)
60
+ h = h * gate_msa.unsqueeze(1)
61
+ x = x + h
62
+ h = self.norm2(x)
63
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
64
+ h = self.mlp(h)
65
+ h = h * gate_mlp.unsqueeze(1)
66
+ x = x + h
67
+ return x
68
+
69
+ def forward(self, x: torch.Tensor, mod: torch.Tensor) -> torch.Tensor:
70
+ if self.use_checkpoint:
71
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, use_reentrant=False)
72
+ else:
73
+ return self._forward(x, mod)
74
+
75
+
76
+ class ModulatedTransformerCrossBlock(nn.Module):
77
+ """
78
+ Transformer cross-attention block (MSA + MCA + FFN) with adaptive layer norm conditioning.
79
+ """
80
+ def __init__(
81
+ self,
82
+ channels: int,
83
+ ctx_channels: int,
84
+ num_heads: int,
85
+ mlp_ratio: float = 4.0,
86
+ attn_mode: Literal["full", "windowed"] = "full",
87
+ window_size: Optional[int] = None,
88
+ shift_window: Optional[Tuple[int, int, int]] = None,
89
+ use_checkpoint: bool = False,
90
+ use_rope: bool = False,
91
+ qk_rms_norm: bool = False,
92
+ qk_rms_norm_cross: bool = False,
93
+ qkv_bias: bool = True,
94
+ share_mod: bool = False,
95
+ ):
96
+ super().__init__()
97
+ self.use_checkpoint = use_checkpoint
98
+ self.share_mod = share_mod
99
+ self.norm1 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
100
+ self.norm2 = LayerNorm32(channels, elementwise_affine=True, eps=1e-6)
101
+ self.norm3 = LayerNorm32(channels, elementwise_affine=False, eps=1e-6)
102
+ self.self_attn = MultiHeadAttention(
103
+ channels,
104
+ num_heads=num_heads,
105
+ type="self",
106
+ attn_mode=attn_mode,
107
+ window_size=window_size,
108
+ shift_window=shift_window,
109
+ qkv_bias=qkv_bias,
110
+ use_rope=use_rope,
111
+ qk_rms_norm=qk_rms_norm,
112
+ )
113
+ self.cross_attn = MultiHeadAttention(
114
+ channels,
115
+ ctx_channels=ctx_channels,
116
+ num_heads=num_heads,
117
+ type="cross",
118
+ attn_mode="full",
119
+ qkv_bias=qkv_bias,
120
+ qk_rms_norm=qk_rms_norm_cross,
121
+ )
122
+ self.mlp = FeedForwardNet(
123
+ channels,
124
+ mlp_ratio=mlp_ratio,
125
+ )
126
+ if not share_mod:
127
+ self.adaLN_modulation = nn.Sequential(
128
+ nn.SiLU(),
129
+ nn.Linear(channels, 6 * channels, bias=True)
130
+ )
131
+
132
+ def _forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
133
+ if self.share_mod:
134
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = mod.chunk(6, dim=1)
135
+ else:
136
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(mod).chunk(6, dim=1)
137
+ h = self.norm1(x)
138
+ h = h * (1 + scale_msa.unsqueeze(1)) + shift_msa.unsqueeze(1)
139
+ h = self.self_attn(h)
140
+ h = h * gate_msa.unsqueeze(1)
141
+ x = x + h
142
+ h = self.norm2(x)
143
+ h = self.cross_attn(h, context)
144
+ x = x + h
145
+ h = self.norm3(x)
146
+ h = h * (1 + scale_mlp.unsqueeze(1)) + shift_mlp.unsqueeze(1)
147
+ h = self.mlp(h)
148
+ h = h * gate_mlp.unsqueeze(1)
149
+ x = x + h
150
+ return x
151
+
152
+ def forward(self, x: torch.Tensor, mod: torch.Tensor, context: torch.Tensor):
153
+ if self.use_checkpoint:
154
+ return torch.utils.checkpoint.checkpoint(self._forward, x, mod, context, use_reentrant=False)
155
+ else:
156
+ return self._forward(x, mod, context)
157
+
trellis/modules/utils.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.nn as nn
2
+ from ..modules import sparse as sp
3
+
4
+ FP16_MODULES = (
5
+ nn.Conv1d,
6
+ nn.Conv2d,
7
+ nn.Conv3d,
8
+ nn.ConvTranspose1d,
9
+ nn.ConvTranspose2d,
10
+ nn.ConvTranspose3d,
11
+ nn.Linear,
12
+ sp.SparseConv3d,
13
+ sp.SparseInverseConv3d,
14
+ sp.SparseLinear,
15
+ )
16
+
17
+ def convert_module_to_f16(l):
18
+ """
19
+ Convert primitive modules to float16.
20
+ """
21
+ if isinstance(l, FP16_MODULES):
22
+ for p in l.parameters():
23
+ p.data = p.data.half()
24
+
25
+
26
+ def convert_module_to_f32(l):
27
+ """
28
+ Convert primitive modules to float32, undoing convert_module_to_f16().
29
+ """
30
+ if isinstance(l, FP16_MODULES):
31
+ for p in l.parameters():
32
+ p.data = p.data.float()
33
+
34
+
35
+ def zero_module(module):
36
+ """
37
+ Zero out the parameters of a module and return it.
38
+ """
39
+ for p in module.parameters():
40
+ p.detach().zero_()
41
+ return module
42
+
43
+
44
+ def scale_module(module, scale):
45
+ """
46
+ Scale the parameters of a module and return it.
47
+ """
48
+ for p in module.parameters():
49
+ p.detach().mul_(scale)
50
+ return module
51
+
52
+
53
+ def modulate(x, shift, scale):
54
+ return x * (1 + scale.unsqueeze(1)) + shift.unsqueeze(1)
trellis/pipelines/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import samplers
2
+ from .trellis_image_to_3d_cpu import TrellisImageTo3DPipeline
3
+
4
+
5
+ def from_pretrained(path: str):
6
+ """
7
+ Load a pipeline from a model folder or a Hugging Face model hub.
8
+
9
+ Args:
10
+ path: The path to the model. Can be either local path or a Hugging Face model name.
11
+ """
12
+ import os
13
+ import json
14
+ is_local = os.path.exists(f"{path}/pipeline.json")
15
+
16
+ if is_local:
17
+ config_file = f"{path}/pipeline.json"
18
+ else:
19
+ from huggingface_hub import hf_hub_download
20
+ config_file = hf_hub_download(path, "pipeline.json")
21
+
22
+ with open(config_file, 'r') as f:
23
+ config = json.load(f)
24
+ return globals()[config['name']].from_pretrained(path)
trellis/pipelines/base.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ from .. import models
5
+
6
+
7
+ class Pipeline:
8
+ """
9
+ A base class for pipelines.
10
+ """
11
+ def __init__(
12
+ self,
13
+ models: dict[str, nn.Module] = None,
14
+ ):
15
+ if models is None:
16
+ return
17
+ self.models = models
18
+ for model in self.models.values():
19
+ model.eval()
20
+
21
+ @staticmethod
22
+ def from_pretrained(path: str) -> "Pipeline":
23
+ """
24
+ Load a pretrained model.
25
+ """
26
+ import os
27
+ import json
28
+ is_local = os.path.exists(f"{path}/pipeline.json")
29
+
30
+ if is_local:
31
+ config_file = f"{path}/pipeline.json"
32
+ else:
33
+ from huggingface_hub import hf_hub_download
34
+ config_file = hf_hub_download(path, "pipeline.json")
35
+
36
+ with open(config_file, 'r') as f:
37
+ args = json.load(f)['args']
38
+
39
+ _models = {
40
+ k: models.from_pretrained(f"{path}/{v}")
41
+ for k, v in args['models'].items()
42
+ }
43
+
44
+ new_pipeline = Pipeline(_models)
45
+ new_pipeline._pretrained_args = args
46
+ return new_pipeline
47
+
48
+ @property
49
+ def device(self) -> torch.device:
50
+ if hasattr(self, "_device"):
51
+ return self._device
52
+ # Jan 2025 memory optimizations: we'll move different models between CPU and GPU.
53
+ # Return 'cuda' if at least one model is on CUDA; otherwise return 'cpu'.
54
+ for model in self.models.values():
55
+ if hasattr(model, 'device') and model.device.type == 'cuda':
56
+ return torch.device('cuda')
57
+ if hasattr(model, 'parameters'):
58
+ try:
59
+ if next(model.parameters()).device.type == 'cuda':
60
+ return torch.device('cuda')
61
+ except StopIteration:
62
+ continue
63
+ return torch.device('cpu')
64
+
65
+ def to(self, dtype=None, device=None):
66
+ """
67
+ Convert all models to the specified dtype and/or move them to the specified device.
68
+
69
+ Args:
70
+ dtype: The target data type (e.g., torch.float16)
71
+ device: The target device (e.g., 'cuda')
72
+
73
+ Returns:
74
+ self: The pipeline instance
75
+ """
76
+ if dtype is not None or device is not None:
77
+ for name, model in self.models.items():
78
+ self.models[name] = model.to(dtype=dtype, device=device)
79
+ if device is not None:
80
+ self._device = torch.device(device)
81
+ return self
82
+
83
+ def half(self):
84
+ """
85
+ Convert all models to half precision (float16).
86
+
87
+ Returns:
88
+ self: The pipeline instance
89
+ """
90
+ return self.to(dtype=torch.float16)
91
+
92
+ def float(self):
93
+ """
94
+ Convert all models to single precision (float32).
95
+
96
+ Returns:
97
+ self: The pipeline instance
98
+ """
99
+ return self.to(dtype=torch.float32)
100
+
101
+ def cuda(self, device=None):
102
+ """
103
+ Move all models to CUDA and set them to evaluation mode.
104
+
105
+ Args:
106
+ device: The specific CUDA device to use
107
+
108
+ Returns:
109
+ self: The pipeline instance
110
+ """
111
+ self.to(device='cuda' if device is None else f'cuda:{device}')
112
+
113
+ # Set all models to evaluation mode
114
+ for model in self.models.values():
115
+ model.eval()
116
+
117
+ return self
118
+
119
+ def cpu(self) -> None:
120
+ self.to(torch.device("cpu"))
trellis/pipelines/samplers/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .base import Sampler
2
+ from .flow_euler import FlowEulerSampler, FlowEulerCfgSampler, FlowEulerGuidanceIntervalSampler
trellis/pipelines/samplers/base.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from abc import ABC, abstractmethod
3
+
4
+
5
+ class Sampler(ABC):
6
+ """
7
+ A base class for samplers.
8
+ """
9
+
10
+ @abstractmethod
11
+ def sample(
12
+ self,
13
+ model,
14
+ **kwargs
15
+ ):
16
+ """
17
+ Sample from a model.
18
+ """
19
+ pass
20
+
trellis/pipelines/samplers/classifier_free_guidance_mixin.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+
4
+ class ClassifierFreeGuidanceSamplerMixin:
5
+ """
6
+ A mixin class for samplers that apply classifier-free guidance.
7
+ """
8
+
9
+ def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, **kwargs):
10
+ pred = super()._inference_model(model, x_t, t, cond, **kwargs)
11
+ neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
12
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
trellis/pipelines/samplers/flow_euler.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import numpy as np
4
+ from tqdm import tqdm
5
+ from easydict import EasyDict as edict
6
+ from .base import Sampler
7
+ from .classifier_free_guidance_mixin import ClassifierFreeGuidanceSamplerMixin
8
+ from .guidance_interval_mixin import GuidanceIntervalSamplerMixin
9
+
10
+ from api_spz.core.exceptions import CancelledException
11
+
12
+ class FlowEulerSampler(Sampler):
13
+ """
14
+ Generate samples from a flow-matching model using Euler sampling.
15
+
16
+ Args:
17
+ sigma_min: The minimum scale of noise in flow.
18
+ """
19
+ def __init__(
20
+ self,
21
+ sigma_min: float,
22
+ ):
23
+ self.sigma_min = sigma_min
24
+
25
+ def _eps_to_xstart(self, x_t, t, eps):
26
+ assert x_t.shape == eps.shape
27
+ return (x_t - (self.sigma_min + (1 - self.sigma_min) * t) * eps) / (1 - t)
28
+
29
+ def _xstart_to_eps(self, x_t, t, x_0):
30
+ assert x_t.shape == x_0.shape
31
+ return (x_t - (1 - t) * x_0) / (self.sigma_min + (1 - self.sigma_min) * t)
32
+
33
+ def _v_to_xstart_eps(self, x_t, t, v):
34
+ assert x_t.shape == v.shape
35
+ eps = (1 - t) * v + x_t
36
+ x_0 = (1 - self.sigma_min) * x_t - (self.sigma_min + (1 - self.sigma_min) * t) * v
37
+ return x_0, eps
38
+
39
+ def _inference_model(self, model, x_t, t, cond=None, **kwargs):
40
+ t = torch.tensor([1000 * t] * x_t.shape[0], device=x_t.device, dtype=torch.float32)
41
+ return model(x_t, t, cond, **kwargs)
42
+
43
+ def _get_model_prediction(self, model, x_t, t, cond=None, **kwargs):
44
+ pred_v = self._inference_model(model, x_t, t, cond, **kwargs)
45
+ pred_x_0, pred_eps = self._v_to_xstart_eps(x_t=x_t, t=t, v=pred_v)
46
+ return pred_x_0, pred_eps, pred_v
47
+
48
+ @torch.no_grad()
49
+ def sample_once(
50
+ self,
51
+ model,
52
+ x_t,
53
+ t: float,
54
+ t_prev: float,
55
+ cond: Optional[Any] = None,
56
+ **kwargs
57
+ ):
58
+ """
59
+ Sample x_{t-1} from the model using Euler method.
60
+
61
+ Args:
62
+ model: The model to sample from.
63
+ x_t: The [N x C x ...] tensor of noisy inputs at time t.
64
+ t: The current timestep.
65
+ t_prev: The previous timestep.
66
+ cond: conditional information.
67
+ **kwargs: Additional arguments for model inference.
68
+
69
+ Returns:
70
+ a dict containing the following
71
+ - 'pred_x_prev': x_{t-1}.
72
+ - 'pred_x_0': a prediction of x_0.
73
+ """
74
+ pred_x_0, pred_eps, pred_v = self._get_model_prediction(model, x_t, t, cond, **kwargs)
75
+ pred_x_prev = x_t - (t - t_prev) * pred_v
76
+ return edict({"pred_x_prev": pred_x_prev, "pred_x_0": pred_x_0})
77
+
78
+ @torch.no_grad()
79
+ def sample(
80
+ self,
81
+ model,
82
+ noise,
83
+ cond: Optional[Any] = None,
84
+ steps: int = 50,
85
+ rescale_t: float = 1.0,
86
+ verbose: bool = True,
87
+ cancel_event=None,
88
+ **kwargs
89
+ ):
90
+ """
91
+ Generate samples from the model using Euler method.
92
+
93
+ Args:
94
+ model: The model to sample from.
95
+ noise: The initial noise tensor.
96
+ cond: conditional information.
97
+ steps: The number of steps to sample.
98
+ rescale_t: The rescale factor for t.
99
+ verbose: If True, show a progress bar.
100
+ **kwargs: Additional arguments for model_inference.
101
+
102
+ Returns:
103
+ a dict containing the following
104
+ - 'samples': the model samples.
105
+ - 'pred_x_t': a list of prediction of x_t.
106
+ - 'pred_x_0': a list of prediction of x_0.
107
+ """
108
+ sample = noise
109
+ t_seq = np.linspace(1, 0, steps + 1)
110
+ t_seq = rescale_t * t_seq / (1 + (rescale_t - 1) * t_seq)
111
+ t_pairs = list((t_seq[i], t_seq[i + 1]) for i in range(steps))
112
+ ret = edict({"samples": None, "pred_x_t": [], "pred_x_0": []})
113
+ for t, t_prev in tqdm(t_pairs, desc="Sampling", disable=not verbose):
114
+ if cancel_event and cancel_event.is_set():
115
+ raise CancelledException(f"Cancelled the Sampling.")
116
+ out = self.sample_once(model, sample, t, t_prev, cond, **kwargs)
117
+ sample = out.pred_x_prev
118
+ ret.pred_x_t.append(out.pred_x_prev)
119
+ ret.pred_x_0.append(out.pred_x_0)
120
+ ret.samples = sample
121
+ return ret
122
+
123
+
124
+ class FlowEulerCfgSampler(ClassifierFreeGuidanceSamplerMixin, FlowEulerSampler):
125
+ """
126
+ Generate samples from a flow-matching model using Euler sampling with classifier-free guidance.
127
+ """
128
+ @torch.no_grad()
129
+ def sample(
130
+ self,
131
+ model,
132
+ noise,
133
+ cond,
134
+ neg_cond,
135
+ steps: int = 50,
136
+ rescale_t: float = 1.0,
137
+ cfg_strength: float = 3.0,
138
+ verbose: bool = True,
139
+ **kwargs
140
+ ):
141
+ """
142
+ Generate samples from the model using Euler method.
143
+
144
+ Args:
145
+ model: The model to sample from.
146
+ noise: The initial noise tensor.
147
+ cond: conditional information.
148
+ neg_cond: negative conditional information.
149
+ steps: The number of steps to sample.
150
+ rescale_t: The rescale factor for t.
151
+ cfg_strength: The strength of classifier-free guidance.
152
+ verbose: If True, show a progress bar.
153
+ **kwargs: Additional arguments for model_inference.
154
+
155
+ Returns:
156
+ a dict containing the following
157
+ - 'samples': the model samples.
158
+ - 'pred_x_t': a list of prediction of x_t.
159
+ - 'pred_x_0': a list of prediction of x_0.
160
+ """
161
+ return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength, **kwargs)
162
+
163
+
164
+ class FlowEulerGuidanceIntervalSampler(GuidanceIntervalSamplerMixin, FlowEulerSampler):
165
+ """
166
+ Generate samples from a flow-matching model using Euler sampling with classifier-free guidance and interval.
167
+ """
168
+ @torch.no_grad()
169
+ def sample(
170
+ self,
171
+ model,
172
+ noise,
173
+ cond,
174
+ neg_cond,
175
+ steps: int = 50,
176
+ rescale_t: float = 1.0,
177
+ cfg_strength: float = 3.0,
178
+ cfg_interval: Tuple[float, float] = (0.0, 1.0),
179
+ verbose: bool = True,
180
+ cancel_event=None,
181
+ **kwargs
182
+ ):
183
+ """
184
+ Generate samples from the model using Euler method.
185
+
186
+ Args:
187
+ model: The model to sample from.
188
+ noise: The initial noise tensor.
189
+ cond: conditional information.
190
+ neg_cond: negative conditional information.
191
+ steps: The number of steps to sample.
192
+ rescale_t: The rescale factor for t.
193
+ cfg_strength: The strength of classifier-free guidance.
194
+ cfg_interval: The interval for classifier-free guidance.
195
+ verbose: If True, show a progress bar.
196
+ **kwargs: Additional arguments for model_inference.
197
+
198
+ Returns:
199
+ a dict containing the following
200
+ - 'samples': the model samples.
201
+ - 'pred_x_t': a list of prediction of x_t.
202
+ - 'pred_x_0': a list of prediction of x_0.
203
+ """
204
+ return super().sample(model, noise, cond, steps, rescale_t, verbose, neg_cond=neg_cond, cfg_strength=cfg_strength,
205
+ cfg_interval=cfg_interval, cancel_event=cancel_event, **kwargs)
trellis/pipelines/samplers/guidance_interval_mixin.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+
4
+ class GuidanceIntervalSamplerMixin:
5
+ """
6
+ A mixin class for samplers that apply classifier-free guidance with interval.
7
+ """
8
+
9
+ def _inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs):
10
+ if cfg_interval[0] <= t <= cfg_interval[1]:
11
+ pred = super()._inference_model(model, x_t, t, cond, **kwargs)
12
+ neg_pred = super()._inference_model(model, x_t, t, neg_cond, **kwargs)
13
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
14
+ else:
15
+ return super()._inference_model(model, x_t, t, cond, **kwargs)
trellis/pipelines/trellis_image_to_3d.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+ import numpy as np
6
+ from torchvision import transforms
7
+ from PIL import Image
8
+ from transformers import AutoModelForImageSegmentation
9
+ from .base import Pipeline
10
+ from . import samplers
11
+ from ..modules import sparse as sp
12
+
13
+ import logging
14
+ from api_spz.core.exceptions import CancelledException
15
+ logger = logging.getLogger("trellis") #was already setup earlier, during main.
16
+
17
+ class TrellisImageTo3DPipeline(Pipeline):
18
+ """
19
+ Pipeline for inferring Trellis image-to-3D models.
20
+
21
+ Args:
22
+ models (dict[str, nn.Module]): The models to use in the pipeline.
23
+ sparse_structure_sampler (samplers.Sampler): The sampler for the sparse structure.
24
+ slat_sampler (samplers.Sampler): The sampler for the structured latent.
25
+ slat_normalization (dict): The normalization parameters for the structured latent.
26
+ image_cond_model (str): The name of the image conditioning model.
27
+ """
28
+ def __init__(
29
+ self,
30
+ models: dict[str, nn.Module] = None,
31
+ sparse_structure_sampler: samplers.Sampler = None,
32
+ slat_sampler: samplers.Sampler = None,
33
+ slat_normalization: dict = None,
34
+ image_cond_model: str = None,
35
+ ):
36
+ if models is None:
37
+ return
38
+ super().__init__(models)
39
+ self.sparse_structure_sampler = sparse_structure_sampler
40
+ self.slat_sampler = slat_sampler
41
+ self.sparse_structure_sampler_params = {}
42
+ self.slat_sampler_params = {}
43
+ self.slat_normalization = slat_normalization
44
+ self.rmbg_model = None
45
+ # self.rmbg_transform = None
46
+ # self._init_image_cond_model(image_cond_model)
47
+
48
+
49
+ @staticmethod
50
+ def from_pretrained(path: str) -> "TrellisImageTo3DPipeline":
51
+ """
52
+ Load a pretrained model.
53
+
54
+ Args:
55
+ path (str): The path to the model. Can be either local path or a Hugging Face repository.
56
+ """
57
+ pipeline = super(TrellisImageTo3DPipeline, TrellisImageTo3DPipeline).from_pretrained(path)
58
+ new_pipeline = TrellisImageTo3DPipeline()
59
+ new_pipeline.__dict__ = pipeline.__dict__
60
+ args = pipeline._pretrained_args
61
+
62
+ new_pipeline.sparse_structure_sampler = getattr(samplers, args['sparse_structure_sampler']['name'])(**args['sparse_structure_sampler']['args'])
63
+ new_pipeline.sparse_structure_sampler_params = args['sparse_structure_sampler']['params']
64
+
65
+ new_pipeline.slat_sampler = getattr(samplers, args['slat_sampler']['name'])(**args['slat_sampler']['args'])
66
+ new_pipeline.slat_sampler_params = args['slat_sampler']['params']
67
+
68
+ new_pipeline.slat_normalization = args['slat_normalization']
69
+
70
+ new_pipeline._init_image_cond_model(args['image_cond_model'])
71
+ new_pipeline._init_rmbg_model()
72
+ # Initialize RMBG model attributes
73
+ # new_pipeline.rmbg_model = None
74
+ # new_pipeline.rmbg_transform = None
75
+
76
+ return new_pipeline
77
+
78
+ def _init_image_cond_model(self, name: str):
79
+ print('✅ Init image conditioning model')
80
+ model_path = "./model_cache/dinov2/dinov2_vitl14_reg4_pretrain.pth"
81
+ # Directly load state dict instead of using torch.hub
82
+ state_dict = torch.load(model_path, map_location='cpu')
83
+ dinov2_model = torch.hub.load('facebookresearch/dinov2', name,
84
+ pretrained=True,
85
+ skip_validation=True)
86
+ dinov2_model.load_state_dict(state_dict)
87
+
88
+ self.models['image_cond_model'] = dinov2_model
89
+ transform = transforms.Compose([
90
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
91
+ ])
92
+ self.image_cond_model_transform = transform
93
+ self.models['image_cond_model'].half()
94
+ self.models['image_cond_model'].eval()
95
+ # self.models['image_cond_model'].to('cpu')
96
+
97
+ def _init_rmbg_model(self):
98
+ # if self.rmbg_model is None:
99
+ model_path = "D:/production2/trellis/code/model_cache/rmbg2"
100
+ self.rmbg_model = AutoModelForImageSegmentation.from_pretrained(
101
+ model_path,
102
+ local_files_only=True,
103
+ trust_remote_code=True
104
+ )
105
+ # torch.set_float32_matmul_precision(['high', 'highest'][0])
106
+ torch.set_float32_matmul_precision('medium')
107
+ self.rmbg_model.to('cuda')
108
+ self.rmbg_model.eval()
109
+ self.rmbg_transform = transforms.Compose([
110
+ transforms.Lambda(lambda img: img.convert('RGB')),
111
+ transforms.Resize((1024, 1024)),
112
+ transforms.ToTensor(),
113
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
114
+ ])
115
+
116
+ def preprocess_image(self, input: Image.Image) -> Image.Image:
117
+ """
118
+ Preprocess the input image.
119
+ """
120
+ has_alpha = False
121
+ if input.mode == 'RGBA':
122
+ alpha = np.array(input)[:, :, 3]
123
+ if not np.all(alpha == 255):
124
+ has_alpha = True
125
+
126
+ if not has_alpha:
127
+ # if self.rmbg_model is None:
128
+ # self._init_rmbg_model()
129
+
130
+ input_rgb = input.convert('RGB')
131
+ input_images = self.rmbg_transform(input_rgb).unsqueeze(0).to('cuda')
132
+
133
+ with torch.no_grad():
134
+ preds = self.rmbg_model(input_images)[-1].sigmoid().cpu()
135
+ pred = preds[0].squeeze()
136
+ pred_pil = transforms.ToPILImage()(pred)
137
+ mask = pred_pil.resize(input.size)
138
+ input.putalpha(mask)
139
+
140
+ output = input
141
+
142
+ # Crop and resize based on alpha channel after background removal
143
+ output_np = np.array(output)
144
+ alpha = output_np[:, :, 3]
145
+ bbox = np.argwhere(alpha > 0.8 * 255)
146
+ bbox = np.min(bbox[:, 1]), np.min(bbox[:, 0]), np.max(bbox[:, 1]), np.max(bbox[:, 0])
147
+ center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
148
+ size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
149
+ size = int(size * 1.2)
150
+ bbox = center[0] - size // 2, center[1] - size // 2, center[0] + size // 2, center[1] + size // 2
151
+ output = output.crop(bbox) # type: ignore
152
+ output = output.resize((518, 518), Image.Resampling.LANCZOS)
153
+
154
+ # Blend RGB and alpha channels and normalize the result
155
+ output = np.array(output).astype(np.float32) / 255
156
+ output = output[:, :, :3] * output[:, :, 3:4]
157
+ output = Image.fromarray((output * 255).astype(np.uint8))
158
+
159
+ return output
160
+
161
+
162
+ @torch.no_grad()
163
+ def encode_image(self, image: Union[torch.Tensor, list[Image.Image]]) -> torch.Tensor:
164
+ """
165
+ Encode the image.
166
+
167
+ Args:
168
+ image (Union[torch.Tensor, list[Image.Image]]): The image to encode
169
+
170
+ Returns:
171
+ torch.Tensor: The encoded features.
172
+ """
173
+ if isinstance(image, torch.Tensor):
174
+ assert image.ndim == 4, "Image tensor should be batched (B, C, H, W)"
175
+ elif isinstance(image, list):
176
+ assert all(isinstance(i, Image.Image) for i in image), "Image list should be list of PIL images"
177
+ image = [i.resize((518, 518), Image.LANCZOS) for i in image]
178
+ image = [np.array(i.convert('RGB')).astype(np.float32) / 255 for i in image]
179
+ desired_dtype = self.models['image_cond_model'].patch_embed.proj.weight.dtype #so it works with float16 or float32, etc.
180
+ image = [torch.from_numpy(i).permute(2, 0, 1).to(desired_dtype) for i in image]
181
+ image = torch.stack(image).to(self.device)
182
+ else:
183
+ raise ValueError(f"Unsupported type of image: {type(image)}")
184
+
185
+ image = self.image_cond_model_transform(image).to(self.device)
186
+ features = self.models['image_cond_model'](image, is_training=True)['x_prenorm']
187
+ patchtokens = F.layer_norm(features, features.shape[-1:])
188
+ return patchtokens
189
+
190
+ def get_cond(self, image: Union[torch.Tensor, list[Image.Image]]) -> dict:
191
+ """
192
+ Get the conditioning information for the model.
193
+
194
+ Args:
195
+ image (Union[torch.Tensor, list[Image.Image]]): The image prompts.
196
+
197
+ Returns:
198
+ dict: The conditioning information
199
+ """
200
+ cond = self.encode_image(image)
201
+ neg_cond = torch.zeros_like(cond)
202
+ return {
203
+ 'cond': cond,
204
+ 'neg_cond': neg_cond,
205
+ }
206
+
207
+ def sample_sparse_structure(
208
+ self,
209
+ cond: dict,
210
+ num_samples: int = 1,
211
+ sampler_params: dict = {},
212
+ cancel_event=None,
213
+ ) -> torch.Tensor:
214
+ """
215
+ Sample sparse structures with the given conditioning.
216
+
217
+ Args:
218
+ cond (dict): The conditioning information.
219
+ num_samples (int): The number of samples to generate.
220
+ sampler_params (dict): Additional parameters for the sampler.
221
+ """
222
+ # Sample occupancy latent
223
+ flow_model = self.models['sparse_structure_flow_model']
224
+ reso = flow_model.resolution
225
+ desired_dtype = next(flow_model.parameters()).dtype #so that it workws with float16, float32, etc.
226
+ noise = torch.randn(num_samples, flow_model.in_channels, reso, reso, reso, dtype=desired_dtype).to(self.device)
227
+ sampler_params = {**self.sparse_structure_sampler_params, **sampler_params}
228
+ z_s = self.sparse_structure_sampler.sample(
229
+ flow_model,
230
+ noise,
231
+ **cond,
232
+ **sampler_params,
233
+ verbose=True,
234
+ cancel_event=cancel_event,
235
+ ).samples
236
+
237
+ # Decode occupancy latent
238
+ decoder = self.models['sparse_structure_decoder']
239
+ coords = torch.argwhere(decoder(z_s)>0)[:, [0, 2, 3, 4]].int()
240
+
241
+ return coords
242
+
243
+
244
+ @torch.no_grad()
245
+ def decode_slat_parallel(
246
+ self,
247
+ slat: sp.SparseTensor,
248
+ formats: List[str] = ['mesh', 'gaussian'],
249
+ cancel_event=None,
250
+ ) -> dict:
251
+ """
252
+ Decode the structured latent in parallel using ThreadPoolExecutor.
253
+
254
+ Args:
255
+ slat (sp.SparseTensor): The structured latent.
256
+ formats (List[str]): The formats to decode the structured latent to.
257
+ cancel_event: Optional event to check for user cancellation.
258
+
259
+ Returns:
260
+ dict: The decoded structured latent.
261
+ """
262
+ from concurrent.futures import ThreadPoolExecutor
263
+
264
+ ret = {}
265
+
266
+ # Check if we need to do any decoding
267
+ if not formats:
268
+ return ret
269
+
270
+ # Check for cancellation before starting
271
+ if cancel_event and cancel_event.is_set():
272
+ raise CancelledException("User Cancelled")
273
+
274
+ # Define decoder functions
275
+ def decode_mesh():
276
+ with torch.no_grad():
277
+ return self.models['slat_decoder_mesh'](slat)
278
+
279
+ def decode_gaussian():
280
+ with torch.no_grad():
281
+ return self.models['slat_decoder_gs'](slat)
282
+
283
+ # Prepare tasks based on requested formats
284
+ tasks = []
285
+ if 'mesh' in formats:
286
+ tasks.append(('mesh', decode_mesh))
287
+ if 'gaussian' in formats:
288
+ tasks.append(('gaussian', decode_gaussian))
289
+
290
+ # If only one format is requested, process it directly
291
+ if len(tasks) == 1:
292
+ print("Decoding single format...")
293
+ format_name, decoder_fn = tasks[0]
294
+ ret[format_name] = decoder_fn()
295
+ return ret
296
+
297
+ # Process multiple formats in parallel
298
+ # logger.info("Decoding multiple formats in parallel...")
299
+ try:
300
+ with ThreadPoolExecutor(max_workers=len(tasks)) as executor:
301
+ # Submit all tasks
302
+ futures = {executor.submit(decoder_fn): format_name for format_name, decoder_fn in tasks}
303
+
304
+ # Collect results as they complete
305
+ for future in futures:
306
+ format_name = futures[future]
307
+ try:
308
+ ret[format_name] = future.result()
309
+ except Exception as e:
310
+ logger.error(f"Error decoding {format_name}: {str(e)}")
311
+ # Continue with other formats if one fails
312
+ finally:
313
+ # Ensure GPU memory is properly synchronized
314
+ torch.cuda.synchronize()
315
+
316
+ return ret
317
+
318
+ def sample_slat(
319
+ self,
320
+ cond: dict,
321
+ coords: torch.Tensor,
322
+ sampler_params: dict = {},
323
+ ) -> sp.SparseTensor:
324
+ """
325
+ Sample structured latent with the given conditioning.
326
+
327
+ Args:
328
+ cond (dict): The conditioning information.
329
+ coords (torch.Tensor): The coordinates of the sparse structure.
330
+ sampler_params (dict): Additional parameters for the sampler.
331
+ """
332
+ # Sample structured latent
333
+ flow_model = self.models['slat_flow_model']
334
+ desired_dtype = next(flow_model.parameters()).dtype #so that it workws with float16, float32, etc.
335
+ noise = sp.SparseTensor(
336
+ feats=torch.randn(coords.shape[0], flow_model.in_channels, dtype=desired_dtype).to(self.device),
337
+ coords=coords,
338
+ )
339
+ sampler_params = {**self.slat_sampler_params, **sampler_params}
340
+ slat = self.slat_sampler.sample(
341
+ flow_model,
342
+ noise,
343
+ **cond,
344
+ **sampler_params,
345
+ verbose=False
346
+ ).samples
347
+
348
+ std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device)
349
+ mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device)
350
+ slat = slat * std + mean
351
+
352
+ return slat
353
+
354
+ @torch.no_grad()
355
+ def run(
356
+ self,
357
+ image: Image.Image,
358
+ num_samples: int = 1,
359
+ seed: int = 42,
360
+ sparse_structure_sampler_params: dict = {},
361
+ slat_sampler_params: dict = {},
362
+ formats: List[str] = ['mesh', 'gaussian'],
363
+ preprocess_image: bool = True,
364
+ parallel_decode: bool = False, # New parameter to control parallel decoding
365
+ cancel_event=None,
366
+ ) -> dict:
367
+ """
368
+ Run the pipeline with models staying on GPU.
369
+
370
+ Args:
371
+ image: Input image
372
+ num_samples: Number of samples to generate
373
+ seed: Random seed for reproducibility
374
+ sparse_structure_sampler_params: Parameters for sparse structure sampling
375
+ slat_sampler_params: Parameters for SLAT sampling
376
+ formats: Output formats to generate
377
+ preprocess_image: Whether to preprocess the input image
378
+ parallel_decode: Whether to decode formats in parallel
379
+ cancel_event: Optional event to check for user cancellation
380
+ """
381
+ try:
382
+ # Phase 1: Image conditioning
383
+ if preprocess_image:
384
+ image = self.preprocess_image(image)
385
+ cond = self.get_cond([image])
386
+
387
+ torch.manual_seed(seed)
388
+
389
+ # Phase 2: Structure generation
390
+ coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params, cancel_event)
391
+
392
+ if cancel_event and cancel_event.is_set():
393
+ raise CancelledException("User Cancelled")
394
+
395
+ # Phase 3: SLAT generation
396
+ slat = self.sample_slat(cond, coords, slat_sampler_params)
397
+
398
+ # Phase 4: Decode SLAT
399
+ # logger.info("Decoding the SLAT, please wait...")
400
+
401
+ # Use parallel or sequential decoding based on parameter
402
+ if parallel_decode and len(formats) > 1:
403
+ return self.decode_slat_parallel(slat, formats, cancel_event)
404
+ else:
405
+ # Original sequential decoding
406
+ ret = {}
407
+ if 'mesh' in formats:
408
+ ret['mesh'] = self.models['slat_decoder_mesh'](slat)
409
+ if 'gaussian' in formats:
410
+ ret['gaussian'] = self.models['slat_decoder_gs'](slat)
411
+ return ret
412
+
413
+ finally:
414
+ # Just clean cache without moving models
415
+ torch.cuda.empty_cache()
416
+
417
+ def _move_models(self, names:List[str], device:str, empty_cache:bool):
418
+ """helps to transport several models from gpu to cpu, or the other way around"""
419
+ for name in names:
420
+ current_device = next(self.models[name].parameters()).device #works for DinoVision, who doesn't have 'self.device'
421
+ target_device = torch.device(device)
422
+ # Only move if current device is different from target device
423
+ if current_device != target_device:
424
+ self.models[name].to(device)
425
+ if empty_cache:
426
+ torch.cuda.empty_cache()
trellis/pipelines/trellis_image_to_3d_cpu.py ADDED
@@ -0,0 +1,726 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import *
3
+ from contextlib import contextmanager
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ import numpy as np
8
+ from torchvision import transforms
9
+ from PIL import Image
10
+ from transformers import AutoModelForImageSegmentation
11
+ from .base import Pipeline
12
+ from . import samplers
13
+ from ..modules import sparse as sp
14
+
15
+ import logging
16
+ import threading
17
+ from api_spz.core.exceptions import CancelledException
18
+ logger = logging.getLogger("trellis") #was already setup earlier, during main.
19
+
20
+ class TrellisImageTo3DPipeline(Pipeline):
21
+ """
22
+ Pipeline for inferring Trellis image-to-3D models.
23
+
24
+ Args:
25
+ models (dict[str, nn.Module]): The models to use in the pipeline.
26
+ sparse_structure_sampler (samplers.Sampler): The sampler for the sparse structure.
27
+ slat_sampler (samplers.Sampler): The sampler for the structured latent.
28
+ slat_normalization (dict): The normalization parameters for the structured latent.
29
+ image_cond_model (str): The name of the image conditioning model.
30
+ """
31
+ def __init__(
32
+ self,
33
+ models: dict[str, nn.Module] = None,
34
+ sparse_structure_sampler: samplers.Sampler = None,
35
+ slat_sampler: samplers.Sampler = None,
36
+ slat_normalization: dict = None,
37
+ image_cond_model: str = None,
38
+ ):
39
+ if models is None:
40
+ self.models = {}
41
+ self.sparse_structure_sampler = None
42
+ self.slat_sampler = None
43
+ self.sparse_structure_sampler_params = {}
44
+ self.slat_sampler_params = {}
45
+ self.slat_normalization = None
46
+ self.rmbg_model = None
47
+ self.image_cond_model_transform = None
48
+ # Fall through to initialize locks and new attributes
49
+ else:
50
+ super().__init__(models)
51
+ self.sparse_structure_sampler = sparse_structure_sampler
52
+ self.slat_sampler = slat_sampler
53
+ self.sparse_structure_sampler_params = {} # Typically set by from_pretrained
54
+ self.slat_sampler_params = {} # Typically set by from_pretrained
55
+ self.slat_normalization = slat_normalization
56
+
57
+ self.rmbg_model_lock = threading.Lock()
58
+ self.run_lock = threading.Lock()
59
+
60
+ self.main_3d_model_names = [
61
+ 'image_cond_model',
62
+ 'sparse_structure_flow_model',
63
+ 'sparse_structure_decoder',
64
+ 'slat_flow_model',
65
+ 'slat_decoder_mesh',
66
+ 'slat_decoder_gs'
67
+ ]
68
+ self.active_run_users = 0
69
+ self.main_models_on_gpu = False
70
+ # self.device is set in super().__init__(models) or should be 'cpu' if models is None initially
71
+ if models is None:
72
+ self._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
73
+
74
+ @staticmethod
75
+ def from_pretrained(path: str, formats: List[str] = ['mesh', 'gaussian']) -> "TrellisImageTo3DPipeline":
76
+ pipeline_from_super = super(TrellisImageTo3DPipeline, TrellisImageTo3DPipeline).from_pretrained(path)
77
+
78
+ new_pipeline = TrellisImageTo3DPipeline()
79
+
80
+ if pipeline_from_super is not None:
81
+ new_pipeline.__dict__.update(pipeline_from_super.__dict__)
82
+ # Ensure _pretrained_args is available for subsequent initializations
83
+ if hasattr(pipeline_from_super, '_pretrained_args'):
84
+ args = pipeline_from_super._pretrained_args
85
+ else:
86
+ logger.warning("_pretrained_args not found on pipeline_from_super. Some initializations might fail.")
87
+ args = {}
88
+ else:
89
+ logger.error("Superclass from_pretrained returned None. Pipeline may be incomplete.")
90
+ args = {}
91
+
92
+ # CRITICAL: Explicitly (re-)initialize locks and active usage trackers
93
+ new_pipeline.rmbg_model_lock = threading.Lock()
94
+ new_pipeline.run_lock = threading.Lock()
95
+ new_pipeline.active_run_users = 0
96
+ new_pipeline.main_models_on_gpu = False # Assume models start on CPU after loading
97
+
98
+ # Ensure main_3d_model_names is initialized
99
+ if not hasattr(new_pipeline, 'main_3d_model_names'):
100
+ new_pipeline.main_3d_model_names = [
101
+ 'image_cond_model', 'sparse_structure_flow_model', 'sparse_structure_decoder',
102
+ 'slat_flow_model', 'slat_decoder_mesh', 'slat_decoder_gs'
103
+ ]
104
+
105
+ # Ensure other essential attributes are present
106
+ if not hasattr(new_pipeline, 'models'): new_pipeline.models = {}
107
+ if not hasattr(new_pipeline, 'sparse_structure_sampler_params'): new_pipeline.sparse_structure_sampler_params = {}
108
+ if not hasattr(new_pipeline, 'slat_sampler_params'): new_pipeline.slat_sampler_params = {}
109
+ new_pipeline._device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
110
+
111
+
112
+ # Filter models based on formats (ensure self.models exists and is a dict)
113
+ if not isinstance(new_pipeline.models, dict): new_pipeline.models = {}
114
+ required_model_keys = {
115
+ 'image_cond_model', 'sparse_structure_flow_model',
116
+ 'sparse_structure_decoder', 'slat_flow_model'
117
+ }
118
+ if 'mesh' in formats: required_model_keys.add('slat_decoder_mesh')
119
+ if 'gaussian' in formats: required_model_keys.add('slat_decoder_gs')
120
+
121
+ current_models = new_pipeline.models
122
+ new_pipeline.models = {k: v for k, v in current_models.items() if k in required_model_keys}
123
+
124
+ # Initialize rest of pipeline, checking if args and specific keys exist
125
+ if 'sparse_structure_sampler' in args and 'name' in args['sparse_structure_sampler']:
126
+ new_pipeline.sparse_structure_sampler = getattr(samplers, args['sparse_structure_sampler']['name'])(**args['sparse_structure_sampler'].get('args', {}))
127
+ new_pipeline.sparse_structure_sampler_params = args['sparse_structure_sampler'].get('params', {})
128
+
129
+ if 'slat_sampler' in args and 'name' in args['slat_sampler']:
130
+ new_pipeline.slat_sampler = getattr(samplers, args['slat_sampler']['name'])(**args['slat_sampler'].get('args', {}))
131
+ new_pipeline.slat_sampler_params = args['slat_sampler'].get('params', {})
132
+
133
+ if 'slat_normalization' in args:
134
+ new_pipeline.slat_normalization = args['slat_normalization']
135
+
136
+ if 'image_cond_model' in args: # This key refers to the model name like 'dinov2_vitl14_reg'
137
+ new_pipeline._init_image_cond_model(args['image_cond_model'])
138
+ # Ensure image_cond_model is also moved to the target device and dtype
139
+ # if not handled by a subsequent global model processing loop.
140
+ # Based on logs, it seems to be handled, but explicit handling can be safer.
141
+ if new_pipeline.models.get('image_cond_model') and hasattr(new_pipeline, 'device') and hasattr(new_pipeline, 'dtype'):
142
+ model_to_move = new_pipeline.models['image_cond_model']
143
+ if next(model_to_move.parameters()).device != new_pipeline.device or model_to_move.dtype != new_pipeline.dtype:
144
+ logger.info(f"Explicitly moving image_cond_model to {new_pipeline.device}, {new_pipeline.dtype} in TrellisImageTo3DPipeline.from_pretrained")
145
+ model_to_move.to(new_pipeline.device, dtype=new_pipeline.dtype).eval()
146
+
147
+ new_pipeline._init_rmbg_model() # rmbg_model is always initialized and handles its own device placement
148
+
149
+ # After all models are loaded and initial device placement has occurred,
150
+ # set main_models_on_gpu based on the pipeline's target device.
151
+ new_pipeline.main_models_on_gpu = False
152
+
153
+ return new_pipeline
154
+
155
+ @torch.no_grad()
156
+ def run(
157
+ self,
158
+ image: Image.Image,
159
+ num_samples: int = 1,
160
+ seed: int = 42,
161
+ sparse_structure_sampler_params: dict = {},
162
+ slat_sampler_params: dict = {},
163
+ formats: List[str] = ['mesh', 'gaussian'],
164
+ preprocess_image: bool = True, # Renamed to avoid conflict with method name
165
+ cancel_event=None,
166
+ ) -> Optional[dict]:
167
+ ret = {}
168
+
169
+ if preprocess_image:
170
+ processed_image = self.preprocess_image(image)
171
+ if processed_image is None:
172
+ logger.error("Image preprocessing returned None. Aborting run.")
173
+ return None
174
+ image_to_process = processed_image
175
+ else:
176
+ image_to_process = image
177
+
178
+ models_to_manage_on_gpu = [
179
+ name for name in self.main_3d_model_names
180
+ if name in self.models and self.models[name] is not None
181
+ ]
182
+
183
+ with self.run_lock:
184
+ self.active_run_users += 1
185
+ try:
186
+ if not self.main_models_on_gpu and torch.cuda.is_available():
187
+ logger.info(f"Models on CPU or pipeline waking up. Moving {len(models_to_manage_on_gpu)} main models to GPU. Active users: {self.active_run_users}.")
188
+ self._move_models(models_to_manage_on_gpu, 'cuda', empty_cache=False)
189
+ self.main_models_on_gpu = True
190
+ torch.cuda.synchronize()
191
+ elif torch.cuda.is_available():
192
+ logger.debug(f"Main models already on GPU. Active users: {self.active_run_users}.")
193
+
194
+ current_operational_device = torch.device("cuda" if self.main_models_on_gpu and torch.cuda.is_available() else "cpu")
195
+
196
+ with torch.amp.autocast(device_type=current_operational_device.type, dtype=torch.float16, enabled=(current_operational_device.type == 'cuda')):
197
+ cond = self.get_cond([image_to_process])
198
+
199
+ torch.manual_seed(seed)
200
+ coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params, cancel_event)
201
+
202
+ if cancel_event and cancel_event.is_set():
203
+ raise CancelledException("User Cancelled")
204
+
205
+ slat = self.sample_slat(cond, coords, slat_sampler_params)
206
+
207
+ if 'slat_decoder_mesh' in self.models and self.models['slat_decoder_mesh'] is not None and 'mesh' in formats:
208
+ ret['mesh'] = self.models['slat_decoder_mesh'](slat)
209
+ if 'slat_decoder_gs' in self.models and self.models['slat_decoder_gs'] is not None and 'gaussian' in formats:
210
+ ret['gaussian'] = self.models['slat_decoder_gs'](slat)
211
+
212
+ except CancelledException:
213
+ logger.info("Pipeline run cancelled by user.")
214
+ # ret might be empty or partially filled.
215
+ raise
216
+ except Exception as e:
217
+ logger.error(f"Exception during pipeline run's core logic: {e}", exc_info=True)
218
+ # ret might be empty or partially filled.
219
+ raise
220
+ finally:
221
+ self.active_run_users -= 1
222
+ if self.active_run_users == 0 and self.main_models_on_gpu:
223
+ logger.info(f"Last active user. Moving {len(models_to_manage_on_gpu)} main models to CPU.")
224
+ self._move_models(models_to_manage_on_gpu, 'cpu', empty_cache=True)
225
+ self.main_models_on_gpu = False
226
+
227
+ if torch.cuda.is_available():
228
+ torch.cuda.synchronize()
229
+ return ret
230
+
231
+ def _init_image_cond_model(self, name: str):
232
+ print('✅ Init image conditioning model')
233
+ # Load standard pretrained DinoV2 weights from hub for portability.
234
+ dinov2_model = torch.hub.load('facebookresearch/dinov2', name,
235
+ pretrained=True,
236
+ skip_validation=True,
237
+ trust_repo=True)
238
+ dinov2_model.eval()
239
+
240
+ self.models['image_cond_model'] = dinov2_model
241
+ transform = transforms.Compose([
242
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
243
+ ])
244
+ self.image_cond_model_transform = transform
245
+
246
+ def _init_rmbg_model(self):
247
+ """Initialize RMBG model in float32 precision for stability"""
248
+ print('✅ Init RMBG model')
249
+ model_path = os.getenv("TRELLIS_REMBG_MODEL", "briaai/RMBG-2.0")
250
+ self.rmbg_model = AutoModelForImageSegmentation.from_pretrained(
251
+ model_path,
252
+ local_files_only=False,
253
+ trust_remote_code=True
254
+ )
255
+ torch.set_float32_matmul_precision('medium')
256
+ self.rmbg_model.to(torch.float32)
257
+ self.rmbg_model.eval()
258
+ # self.rmbg_model_lock is already initialized in __init__
259
+
260
+ self.rmbg_transform = transforms.Compose([
261
+ transforms.Lambda(lambda img: img.convert('RGB')),
262
+ transforms.Resize((1024, 1024)),
263
+ transforms.ToTensor(),
264
+ transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
265
+ ])
266
+
267
+ def preprocess_images(self, input_pil_images: List[Image.Image]) -> List[Optional[Image.Image]]:
268
+ if not input_pil_images:
269
+ return []
270
+
271
+ output_images = []
272
+ images_needing_rmbg = []
273
+ indices_needing_rmbg = []
274
+
275
+ for idx, input_pil_image in enumerate(input_pil_images):
276
+ has_alpha = False
277
+ if input_pil_image.mode == 'RGBA':
278
+ alpha = np.array(input_pil_image)[:, :, 3]
279
+ if not np.all(alpha == 255):
280
+ has_alpha = True
281
+
282
+ output_pil_image = input_pil_image.copy()
283
+
284
+ if not has_alpha:
285
+ images_needing_rmbg.append(output_pil_image)
286
+ indices_needing_rmbg.append(idx)
287
+
288
+ output_images.append(output_pil_image)
289
+
290
+ if images_needing_rmbg:
291
+ if not hasattr(self, 'rmbg_model') or self.rmbg_model is None or self.rmbg_model_lock is None:
292
+ logger.warning("RMBG model or lock not initialized, skipping background removal.")
293
+ else:
294
+ with self.rmbg_model_lock:
295
+ rmbg_model_instance = self.rmbg_model
296
+ rmbg_processing_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
297
+ rmbg_processing_dtype = torch.float16 if rmbg_processing_device.type == 'cuda' else torch.float32
298
+
299
+ model_moved = False
300
+ if next(rmbg_model_instance.parameters()).device != rmbg_processing_device:
301
+ rmbg_model_instance.to(rmbg_processing_device)
302
+ logger.info("Moved rmbg_model to CUDA for batch preprocessing.")
303
+ model_moved = True
304
+
305
+ if next(rmbg_model_instance.parameters()).dtype != rmbg_processing_dtype:
306
+ rmbg_model_instance.half()
307
+ logger.info("Converted rmbg_model to float16 for batch preprocessing.")
308
+
309
+ rmbg_model_instance.eval()
310
+
311
+ try:
312
+ for idx, img_idx in enumerate(indices_needing_rmbg):
313
+ try:
314
+ img_to_process = output_images[img_idx]
315
+ input_rgb = img_to_process.convert('RGB')
316
+ input_tensor_transformed = self.rmbg_transform(input_rgb).unsqueeze(0)
317
+ input_tensor_for_model = input_tensor_transformed.to(rmbg_processing_device, dtype=rmbg_processing_dtype)
318
+
319
+ with torch.amp.autocast(device_type=rmbg_processing_device.type, dtype=rmbg_processing_dtype, enabled=(rmbg_processing_device.type == 'cuda')):
320
+ with torch.no_grad():
321
+ preds = rmbg_model_instance(input_tensor_for_model)[-1].sigmoid()
322
+
323
+ pred_cpu = preds[0].squeeze().cpu()
324
+ pred_pil = transforms.ToPILImage()(pred_cpu)
325
+ mask = pred_pil.resize(img_to_process.size)
326
+ img_to_process.putalpha(mask)
327
+ output_images[img_idx] = img_to_process
328
+
329
+ except Exception as e:
330
+ logger.error(f"Error in preprocess_images background removal for image {img_idx}: {e}", exc_info=True)
331
+
332
+ finally:
333
+ if model_moved:
334
+ rmbg_model_instance.to(torch.device('cpu'), dtype=torch.float32)
335
+ logger.info("Moved rmbg_model back to CPU after batch preprocessing.")
336
+
337
+ processed_outputs = []
338
+ for output_pil_image in output_images:
339
+ output_np = np.array(output_pil_image)
340
+ alpha = output_np[:, :, 3]
341
+ bbox_indices = np.argwhere(alpha > 0.8 * 255)
342
+
343
+ if len(bbox_indices) == 0:
344
+ bbox = (0, 0, output_np.shape[1], output_np.shape[0])
345
+ center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
346
+ size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
347
+ else:
348
+ bbox = (
349
+ np.min(bbox_indices[:, 1]),
350
+ np.min(bbox_indices[:, 0]),
351
+ np.max(bbox_indices[:, 1]),
352
+ np.max(bbox_indices[:, 0])
353
+ )
354
+ center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
355
+ size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
356
+
357
+ size = int(size * 1.2)
358
+ bbox = (
359
+ int(center[0] - size // 2),
360
+ int(center[1] - size // 2),
361
+ int(center[0] + size // 2),
362
+ int(center[1] + size // 2)
363
+ )
364
+
365
+ output = output_pil_image.crop(bbox)
366
+ output = output.resize((518, 518), Image.Resampling.LANCZOS)
367
+
368
+ output = np.array(output).astype(np.float32) / 255
369
+ output = output[:, :, :3] * output[:, :, 3:4]
370
+ output = Image.fromarray((output * 255).astype(np.uint8))
371
+
372
+ processed_outputs.append(output)
373
+
374
+ return processed_outputs
375
+
376
+ def preprocess_image(self, input_pil_image: Image.Image) -> Optional[Image.Image]:
377
+ has_alpha = False
378
+ if input_pil_image.mode == 'RGBA':
379
+ alpha = np.array(input_pil_image)[:, :, 3]
380
+ if not np.all(alpha == 255):
381
+ has_alpha = True
382
+
383
+ output_pil_image = input_pil_image.copy()
384
+
385
+ if not has_alpha:
386
+ if not hasattr(self, 'rmbg_model') or self.rmbg_model is None or self.rmbg_model_lock is None:
387
+ logger.warning("RMBG model or lock not initialized, skipping background removal.")
388
+ else:
389
+ with self.rmbg_model_lock:
390
+ rmbg_model_instance = self.rmbg_model
391
+ rmbg_processing_device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
392
+ rmbg_processing_dtype = torch.float16 if rmbg_processing_device.type == 'cuda' else torch.float32
393
+
394
+ if next(rmbg_model_instance.parameters()).device != rmbg_processing_device:
395
+ rmbg_model_instance.to(rmbg_processing_device)
396
+ logger.info("Moved rmbg_model to CUDA for preprocessing.")
397
+
398
+ if next(rmbg_model_instance.parameters()).dtype != rmbg_processing_dtype:
399
+ rmbg_model_instance.half()
400
+ logger.info("Converted rmbg_model to float16 for preprocessing.")
401
+
402
+ rmbg_model_instance.eval()
403
+
404
+ try:
405
+ input_rgb = output_pil_image.convert('RGB')
406
+ input_tensor_transformed = self.rmbg_transform(input_rgb).unsqueeze(0)
407
+ input_tensor_for_model = input_tensor_transformed.to(rmbg_processing_device, dtype=rmbg_processing_dtype)
408
+
409
+ with torch.amp.autocast(device_type=rmbg_processing_device.type, dtype=rmbg_processing_dtype, enabled=(rmbg_processing_device.type == 'cuda')):
410
+ with torch.no_grad():
411
+ preds = rmbg_model_instance(input_tensor_for_model)[-1].sigmoid()
412
+
413
+ pred_cpu = preds[0].squeeze().cpu()
414
+ pred_pil = transforms.ToPILImage()(pred_cpu)
415
+ mask = pred_pil.resize(output_pil_image.size)
416
+ output_pil_image.putalpha(mask)
417
+
418
+ except Exception as e:
419
+ logger.error(f"Error in preprocess_image background removal: {e}", exc_info=True)
420
+ finally:
421
+ rmbg_model_instance.to(torch.device('cpu'), dtype=torch.float32)
422
+
423
+ output_np = np.array(output_pil_image)
424
+ alpha = output_np[:, :, 3]
425
+ bbox_indices = np.argwhere(alpha > 0.8 * 255)
426
+
427
+ if len(bbox_indices) == 0:
428
+ # If no pixels above threshold found, use the entire image
429
+ bbox = (0, 0, output_np.shape[1], output_np.shape[0])
430
+ center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
431
+ size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
432
+ else:
433
+ # Calculate bbox from valid indices
434
+ bbox = (
435
+ np.min(bbox_indices[:, 1]), # x_min
436
+ np.min(bbox_indices[:, 0]), # y_min
437
+ np.max(bbox_indices[:, 1]), # x_max
438
+ np.max(bbox_indices[:, 0]) # y_max
439
+ )
440
+ center = (bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2
441
+ size = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
442
+
443
+ # Add padding
444
+ size = int(size * 1.2)
445
+ bbox = (
446
+ int(center[0] - size // 2),
447
+ int(center[1] - size // 2),
448
+ int(center[0] + size // 2),
449
+ int(center[1] + size // 2)
450
+ )
451
+
452
+ output = output_pil_image.crop(bbox) # type: ignore
453
+ output = output.resize((518, 518), Image.Resampling.LANCZOS)
454
+
455
+ # Blend RGB and alpha channels and normalize the result
456
+ output = np.array(output).astype(np.float32) / 255
457
+ output = output[:, :, :3] * output[:, :, 3:4]
458
+ output = Image.fromarray((output * 255).astype(np.uint8))
459
+
460
+ return output
461
+
462
+
463
+ @torch.no_grad()
464
+ def encode_image(self, image: Union[torch.Tensor, list[Image.Image]]) -> torch.Tensor:
465
+ """Encode the image with autocast for fp16"""
466
+ if isinstance(image, torch.Tensor):
467
+ assert image.ndim == 4, "Image tensor should be batched (B, C, H, W)"
468
+ elif isinstance(image, list):
469
+ assert all(isinstance(i, Image.Image) for i in image), "Image list should be list of PIL images"
470
+ image = [i.resize((518, 518), Image.LANCZOS) for i in image]
471
+ image = [np.array(i.convert('RGB')).astype(np.float32) / 255 for i in image]
472
+ image = [torch.from_numpy(i).permute(2, 0, 1) for i in image]
473
+ image = torch.stack(image)
474
+ else:
475
+ raise ValueError(f"Unsupported type of image: {type(image)}")
476
+
477
+ model_device = next(self.models['image_cond_model'].parameters()).device
478
+ image_dtype = torch.float16 if model_device.type == 'cuda' else torch.float32
479
+ image = self.image_cond_model_transform(image).to(model_device, dtype=image_dtype)
480
+
481
+ with torch.amp.autocast(model_device.type, dtype=image_dtype, enabled=(model_device.type == 'cuda')):
482
+ features = self.models['image_cond_model'](image, is_training=True)['x_prenorm']
483
+ patchtokens = F.layer_norm(features, features.shape[-1:])
484
+
485
+ return patchtokens
486
+
487
+ def get_cond(self, image: Union[torch.Tensor, list[Image.Image]]) -> dict:
488
+ cond = self.encode_image(image)
489
+ neg_cond = torch.zeros_like(cond)
490
+ return {
491
+ 'cond': cond,
492
+ 'neg_cond': neg_cond,
493
+ }
494
+
495
+ def sample_sparse_structure(
496
+ self,
497
+ conditioning_dict: dict,
498
+ num_samples: int = 1,
499
+ sampler_params: dict = {},
500
+ cancel_event=None,
501
+ ) -> torch.Tensor:
502
+ """Sample sparse structures with autocast for fp16"""
503
+ with torch.no_grad():
504
+ # Autocast is enabled based on current_operational_device in the run method
505
+ # We need to ensure models and data are on the correct device here.
506
+ flow_model = self.models['sparse_structure_flow_model']
507
+ current_device = next(flow_model.parameters()).device
508
+
509
+ reso = flow_model.resolution
510
+ noise_dtype = torch.float16 if current_device.type == 'cuda' else torch.float32
511
+ noise = torch.randn(num_samples, flow_model.in_channels, reso, reso, reso, dtype=noise_dtype).to(current_device)
512
+
513
+ # Make a copy of sampler_params to avoid modifying the original dict from config
514
+ # This is good practice if sampler_params might be modified (e.g., by pop)
515
+ # and is reused elsewhere or expected to be constant.
516
+ current_sampler_params = sampler_params.copy()
517
+
518
+ # If 'steps' or other parameters from sampler_params are also expected positionally
519
+ # by the sampler, they would need to be extracted here and passed positionally.
520
+ # For example:
521
+ # steps = current_sampler_params.pop('steps', default_steps_value_if_any)
522
+ # Then pass 'steps' positionally.
523
+
524
+ z_s = self.sparse_structure_sampler.sample(
525
+ flow_model,
526
+ noise,
527
+ conditioning_dict['cond'], # Pass 'cond' positionally
528
+ conditioning_dict['neg_cond'], # Pass 'neg_cond' positionally
529
+ # If other arguments like 'steps' are positional, they come next.
530
+ # Otherwise, they are passed via **current_sampler_params.
531
+ **current_sampler_params, # Remaining sampler parameters as keywords
532
+ verbose=True, # Explicit keyword argument
533
+ cancel_event=cancel_event # Explicit keyword argument
534
+ ).samples
535
+
536
+ decoder = self.models['sparse_structure_decoder']
537
+ # Ensure decoder is on the same device as z_s if z_s is on GPU
538
+ if next(decoder.parameters()).device != z_s.device:
539
+ decoder.to(z_s.device)
540
+ coords = torch.argwhere(decoder(z_s)>0)[:, [0, 2, 3, 4]].int()
541
+ return coords
542
+
543
+ @torch.no_grad()
544
+ def sample_slat(
545
+ self,
546
+ cond: dict,
547
+ coords: torch.Tensor,
548
+ sampler_params: dict = {},
549
+ ) -> sp.SparseTensor:
550
+ """Sample structured latent with autocast for fp16"""
551
+ flow_model = self.models['slat_flow_model']
552
+ flow_model_device = next(flow_model.parameters()).device
553
+ desired_dtype = next(flow_model.parameters()).dtype
554
+ noise = sp.SparseTensor(
555
+ feats=torch.randn(coords.shape[0], flow_model.in_channels, dtype=desired_dtype).to(flow_model_device),
556
+ coords=coords,
557
+ )
558
+ sampler_params = {**self.slat_sampler_params, **sampler_params}
559
+
560
+ with torch.amp.autocast(device_type=flow_model_device.type, dtype=desired_dtype, enabled=(flow_model_device.type == 'cuda')):
561
+ slat = self.slat_sampler.sample(
562
+ flow_model,
563
+ noise,
564
+ **cond,
565
+ **sampler_params,
566
+ verbose=False
567
+ ).samples
568
+
569
+ std = torch.tensor(self.slat_normalization['std'])[None].to(slat.device)
570
+ mean = torch.tensor(self.slat_normalization['mean'])[None].to(slat.device)
571
+ slat = slat * std + mean
572
+
573
+ return slat
574
+
575
+ @contextmanager
576
+ def inject_sampler_multi_image(
577
+ self,
578
+ sampler_name: str,
579
+ num_images: int,
580
+ num_steps: int,
581
+ mode: Literal['stochastic', 'multidiffusion'] = 'stochastic',
582
+ ):
583
+ sampler = getattr(self, sampler_name)
584
+ setattr(sampler, f'_old_inference_model', sampler._inference_model)
585
+
586
+ if mode == 'stochastic':
587
+ if num_images > num_steps:
588
+ logger.warning(f"Number of conditioning images is greater than number of steps for {sampler_name}. "
589
+ "This may lead to performance degradation.")
590
+
591
+ cond_indices = (np.arange(num_steps) % num_images).tolist()
592
+ def _new_inference_model(self, model, x_t, t, cond, **kwargs):
593
+ cond_idx = cond_indices.pop(0)
594
+ cond_i = cond[cond_idx:cond_idx+1]
595
+ return self._old_inference_model(model, x_t, t, cond=cond_i, **kwargs)
596
+
597
+ elif mode =='multidiffusion':
598
+ from .samplers import FlowEulerSampler
599
+ def _new_inference_model(self, model, x_t, t, cond, neg_cond, cfg_strength, cfg_interval, **kwargs):
600
+ if cfg_interval[0] <= t <= cfg_interval[1]:
601
+ preds = []
602
+ for i in range(len(cond)):
603
+ preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs))
604
+ pred = sum(preds) / len(preds)
605
+ neg_pred = FlowEulerSampler._inference_model(self, model, x_t, t, neg_cond, **kwargs)
606
+ return (1 + cfg_strength) * pred - cfg_strength * neg_pred
607
+ else:
608
+ preds = []
609
+ for i in range(len(cond)):
610
+ preds.append(FlowEulerSampler._inference_model(self, model, x_t, t, cond[i:i+1], **kwargs))
611
+ pred = sum(preds) / len(preds)
612
+ return pred
613
+
614
+ else:
615
+ raise ValueError(f"Unsupported mode: {mode}")
616
+
617
+ sampler._inference_model = _new_inference_model.__get__(sampler, type(sampler))
618
+
619
+ yield
620
+
621
+ sampler._inference_model = sampler._old_inference_model
622
+ delattr(sampler, f'_old_inference_model')
623
+
624
+ @torch.no_grad()
625
+ def run_multi_image(
626
+ self,
627
+ images: List[Image.Image],
628
+ num_samples: int = 1,
629
+ seed: int = 42,
630
+ sparse_structure_sampler_params: dict = {},
631
+ slat_sampler_params: dict = {},
632
+ formats: List[str] = ['mesh', 'gaussian'],
633
+ preprocess_image: bool = True,
634
+ mode: Literal['stochastic', 'multidiffusion'] = 'stochastic',
635
+ cancel_event=None,
636
+ ) -> Optional[dict]:
637
+ ret = {}
638
+
639
+ if preprocess_image:
640
+ processed_images = self.preprocess_images(images)
641
+ if any(img is None for img in processed_images):
642
+ logger.error("One or more images failed preprocessing. Aborting run_multi_image.")
643
+ return None
644
+ images_to_process = processed_images
645
+ else:
646
+ images_to_process = images
647
+
648
+ models_to_manage_on_gpu = [
649
+ name for name in self.main_3d_model_names
650
+ if name in self.models and self.models[name] is not None
651
+ ]
652
+
653
+ with self.run_lock:
654
+ self.active_run_users += 1
655
+ try:
656
+ if not self.main_models_on_gpu and torch.cuda.is_available():
657
+ logger.info(f"Models on CPU or pipeline waking up. Moving {len(models_to_manage_on_gpu)} main models to GPU. Active users: {self.active_run_users}.")
658
+ self._move_models(models_to_manage_on_gpu, 'cuda', empty_cache=False)
659
+ self.main_models_on_gpu = True
660
+ torch.cuda.synchronize()
661
+ elif torch.cuda.is_available():
662
+ logger.debug(f"Main models already on GPU. Active users: {self.active_run_users}.")
663
+
664
+ current_operational_device = torch.device("cuda" if self.main_models_on_gpu and torch.cuda.is_available() else "cpu")
665
+
666
+ with torch.amp.autocast(device_type=current_operational_device.type, dtype=torch.float16, enabled=(current_operational_device.type == 'cuda')):
667
+ cond = self.get_cond(images_to_process)
668
+ cond['neg_cond'] = cond['neg_cond'][:1]
669
+
670
+ torch.manual_seed(seed)
671
+
672
+ ss_steps = {**self.sparse_structure_sampler_params, **sparse_structure_sampler_params}.get('steps')
673
+ with self.inject_sampler_multi_image('sparse_structure_sampler', len(images_to_process), ss_steps, mode=mode):
674
+ coords = self.sample_sparse_structure(cond, num_samples, sparse_structure_sampler_params, cancel_event)
675
+
676
+ if cancel_event and cancel_event.is_set():
677
+ raise CancelledException("User Cancelled")
678
+
679
+ slat_steps = {**self.slat_sampler_params, **slat_sampler_params}.get('steps')
680
+ with self.inject_sampler_multi_image('slat_sampler', len(images_to_process), slat_steps, mode=mode):
681
+ slat = self.sample_slat(cond, coords, slat_sampler_params)
682
+
683
+ if 'slat_decoder_mesh' in self.models and self.models['slat_decoder_mesh'] is not None and 'mesh' in formats:
684
+ ret['mesh'] = self.models['slat_decoder_mesh'](slat)
685
+ if 'slat_decoder_gs' in self.models and self.models['slat_decoder_gs'] is not None and 'gaussian' in formats:
686
+ ret['gaussian'] = self.models['slat_decoder_gs'](slat)
687
+
688
+ except CancelledException:
689
+ logger.info("Pipeline run_multi_image cancelled by user.")
690
+ raise
691
+ except Exception as e:
692
+ logger.error(f"Exception during pipeline run_multi_image's core logic: {e}", exc_info=True)
693
+ raise
694
+ finally:
695
+ self.active_run_users -= 1
696
+ if self.active_run_users == 0 and self.main_models_on_gpu:
697
+ logger.info(f"Last active user. Moving {len(models_to_manage_on_gpu)} main models to CPU.")
698
+ self._move_models(models_to_manage_on_gpu, 'cpu', empty_cache=True)
699
+ self.main_models_on_gpu = False
700
+
701
+ if torch.cuda.is_available():
702
+ torch.cuda.synchronize()
703
+ return ret
704
+
705
+ def _move_models(self, names: List[str], device: str, empty_cache: bool = False):
706
+ target_torch_device = torch.device(device)
707
+
708
+ for name in names:
709
+ if name not in self.models or self.models[name] is None:
710
+ continue
711
+
712
+ model = self.models[name]
713
+ current_device = next(model.parameters()).device
714
+ if current_device != target_torch_device:
715
+ logger.info(f"🎯 {name}: {current_device} -> {target_torch_device}")
716
+ model.to(target_torch_device)
717
+ self._device = target_torch_device
718
+
719
+ if empty_cache and torch.cuda.is_available():
720
+ logger.info("Emptying CUDA cache")
721
+ torch.cuda.empty_cache()
722
+
723
+ def _move_all_models_to_cpu(self):
724
+ """Moves all models to CPU and frees CUDA memory. Helps to start from a clean state"""
725
+ self._move_models([name for name in self.models], 'cpu', empty_cache=True)
726
+ torch.cuda.empty_cache()