JasonYinnnn commited on
Commit
4b10ae8
·
1 Parent(s): 34898d2

use gsplat

Browse files
app.py CHANGED
@@ -192,6 +192,12 @@ def run_depth_estimation(
192
  ) -> Image.Image:
193
  rgb_image = image_prompts["image"].convert("RGB")
194
 
 
 
 
 
 
 
195
  from threeDFixer.datasets.utils import (
196
  normalize_vertices,
197
  project2ply
@@ -325,12 +331,6 @@ def run_generation(
325
  t_rescale: float = 3.0,
326
  work_space: dict = None,
327
  ):
328
-
329
- try:
330
- import diff_gaussian_rasterization
331
- except ModuleNotFoundError:
332
- install_mipsplatting()
333
-
334
  if work_space is None:
335
  raise gr.Error("Please run step 1 and step 2 first.")
336
  required_keys = ["dir", "depth_mask", "depth", "K", "c2w", "trans", "scale"]
 
192
  ) -> Image.Image:
193
  rgb_image = image_prompts["image"].convert("RGB")
194
 
195
+ import torch
196
+ print(torch.__version__)
197
+ print(torch.version.cuda)
198
+ print(torch.__file__)
199
+ print(torch.cuda.is_available())
200
+
201
  from threeDFixer.datasets.utils import (
202
  normalize_vertices,
203
  project2ply
 
331
  t_rescale: float = 3.0,
332
  work_space: dict = None,
333
  ):
 
 
 
 
 
 
334
  if work_space is None:
335
  raise gr.Error("Please run step 1 and step 2 first.")
336
  required_keys = ["dir", "depth_mask", "depth", "K", "c2w", "trans", "scale"]
requirements.txt CHANGED
@@ -40,3 +40,4 @@ pydantic==2.10.6
40
  kaolin==0.18.0
41
  flash-attn==2.8.3+pt2.8.0cu129
42
  nvdiffrast==0.4.0+253ac4fpt2.8.0cu129
 
 
40
  kaolin==0.18.0
41
  flash-attn==2.8.3+pt2.8.0cu129
42
  nvdiffrast==0.4.0+253ac4fpt2.8.0cu129
43
+ git+https://github.com/nerfstudio-project/gsplat.git
threeDFixer/renderers/gaussian_render.py CHANGED
@@ -47,97 +47,71 @@ def intrinsics_to_projection(
47
  return ret
48
 
49
 
50
- def render(viewpoint_camera, pc : Gaussian, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
51
- """
52
- Render the scene.
53
-
54
- Background tensor (bg_color) must be on GPU!
55
- """
56
  # lazy import
57
- if 'GaussianRasterizer' not in globals():
58
- from diff_gaussian_rasterization import GaussianRasterizer, GaussianRasterizationSettings
59
-
60
- # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
61
- screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
62
- try:
63
- screenspace_points.retain_grad()
64
- except:
65
- pass
66
- # Set up rasterization configuration
67
  tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
68
  tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
69
-
70
- kernel_size = pipe.kernel_size
71
- subpixel_offset = torch.zeros((int(viewpoint_camera.image_height), int(viewpoint_camera.image_width), 2), dtype=torch.float32, device="cuda")
72
-
73
- raster_settings = GaussianRasterizationSettings(
74
- image_height=int(viewpoint_camera.image_height),
75
- image_width=int(viewpoint_camera.image_width),
76
- tanfovx=tanfovx,
77
- tanfovy=tanfovy,
78
- kernel_size=kernel_size,
79
- subpixel_offset=subpixel_offset,
80
- bg=bg_color,
81
- scale_modifier=scaling_modifier,
82
- viewmatrix=viewpoint_camera.world_view_transform,
83
- projmatrix=viewpoint_camera.full_proj_transform,
84
- sh_degree=pc.active_sh_degree,
85
- campos=viewpoint_camera.camera_center,
86
- prefiltered=False,
87
- debug=pipe.debug
88
  )
89
-
90
- rasterizer = GaussianRasterizer(raster_settings=raster_settings)
91
 
92
  means3D = pc.get_xyz
93
- means2D = screenspace_points
94
  opacity = pc.get_opacity
 
 
95
 
96
- # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
97
- # scaling / rotation by the rasterizer.
98
- scales = None
99
- rotations = None
100
- cov3D_precomp = None
101
- if pipe.compute_cov3D_python:
102
- cov3D_precomp = pc.get_covariance(scaling_modifier)
103
- else:
104
- scales = pc.get_scaling
105
- rotations = pc.get_rotation
106
-
107
- # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
108
- # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
109
- shs = None
110
- colors_precomp = None
111
- if override_color is None:
112
- if pipe.convert_SHs_python:
113
- shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
114
- dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
115
- dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
116
- sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
117
- colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
118
- else:
119
- shs = pc.get_features
120
  else:
121
- colors_precomp = override_color
122
-
123
- # Rasterize visible Gaussians to image, obtain their radii (on screen).
124
- rendered_image, radii = rasterizer(
125
- means3D = means3D,
126
- means2D = means2D,
127
- shs = shs,
128
- colors_precomp = colors_precomp,
129
- opacities = opacity,
130
- scales = scales,
131
- rotations = rotations,
132
- cov3D_precomp = cov3D_precomp
 
 
 
 
 
 
 
133
  )
134
 
135
- # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
136
- # They will be excluded from value updates used in the splitting criteria.
137
- return edict({"render": rendered_image,
138
- "viewspace_points": screenspace_points,
139
- "visibility_filter" : radii > 0,
140
- "radii": radii})
 
 
 
 
 
 
 
 
141
 
142
 
143
  class GaussianRenderer:
 
47
  return ret
48
 
49
 
50
+ def render(viewpoint_camera, pc, pipe, bg_color: torch.Tensor, scaling_modifier=1.0, override_color=None):
 
 
 
 
 
51
  # lazy import
52
+ if "rasterization" not in globals():
53
+ from gsplat import rasterization
54
+
 
 
 
 
 
 
 
55
  tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
56
  tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
57
+
58
+ focal_length_x = viewpoint_camera.image_width / (2 * tanfovx)
59
+ focal_length_y = viewpoint_camera.image_height / (2 * tanfovy)
60
+
61
+ K = torch.tensor(
62
+ [
63
+ [focal_length_x, 0, viewpoint_camera.image_width / 2.0],
64
+ [0, focal_length_y, viewpoint_camera.image_height / 2.0],
65
+ [0, 0, 1],
66
+ ],
67
+ device=pc.get_xyz.device,
68
+ dtype=torch.float32,
 
 
 
 
 
 
 
69
  )
 
 
70
 
71
  means3D = pc.get_xyz
 
72
  opacity = pc.get_opacity
73
+ scales = pc.get_scaling * scaling_modifier
74
+ rotations = pc.get_rotation
75
 
76
+ if override_color is not None:
77
+ colors = override_color # [N, 3]
78
+ sh_degree = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  else:
80
+ colors = pc.get_features # [N, K, 3]
81
+ sh_degree = pc.active_sh_degree
82
+
83
+ viewmat = viewpoint_camera.world_view_transform.transpose(0, 1)
84
+
85
+ render_colors, render_alphas, info = rasterization(
86
+ means=means3D, # [N, 3]
87
+ quats=rotations, # [N, 4]
88
+ scales=scales, # [N, 3]
89
+ opacities=opacity.squeeze(-1), # [N]
90
+ colors=colors,
91
+ viewmats=viewmat[None], # [1, 4, 4]
92
+ Ks=K[None], # [1, 3, 3]
93
+ backgrounds=bg_color[None],
94
+ width=int(viewpoint_camera.image_width),
95
+ height=int(viewpoint_camera.image_height),
96
+ packed=False,
97
+ sh_degree=sh_degree,
98
+ rasterize_mode='antialiased'
99
  )
100
 
101
+ rendered_image = render_colors[0].permute(2, 0, 1)
102
+ radii = info["radii"].squeeze(0)
103
+
104
+ try:
105
+ info["means2d"].retain_grad()
106
+ except Exception:
107
+ pass
108
+
109
+ return edict({
110
+ "render": rendered_image,
111
+ "viewspace_points": info["means2d"],
112
+ "visibility_filter": radii > 0,
113
+ "radii": radii,
114
+ })
115
 
116
 
117
  class GaussianRenderer: