xrg
commited on
Commit
·
1b4cf24
1
Parent(s):
4bdba25
update config
Browse files- .vscode/launch.json +1 -1
- app.py +34 -4
- core/models.py +1 -1
- example/{blue_cat.png → color_cat.png} +0 -0
.vscode/launch.json
CHANGED
|
@@ -8,7 +8,7 @@
|
|
| 8 |
"program": "./app.py",
|
| 9 |
"console": "integratedTerminal",
|
| 10 |
"env": {
|
| 11 |
-
"CUDA_VISIBLE_DEVICES": "
|
| 12 |
},
|
| 13 |
// "args": [
|
| 14 |
// "tiny_trf_trans_nerf",//"tiny_trf_trans_nerf" tiny_trf_trans_nerf_123plus
|
|
|
|
| 8 |
"program": "./app.py",
|
| 9 |
"console": "integratedTerminal",
|
| 10 |
"env": {
|
| 11 |
+
"CUDA_VISIBLE_DEVICES": "0"
|
| 12 |
},
|
| 13 |
// "args": [
|
| 14 |
// "tiny_trf_trans_nerf",//"tiny_trf_trans_nerf" tiny_trf_trans_nerf_123plus
|
app.py
CHANGED
|
@@ -34,7 +34,7 @@ GRADIO_OBJ_SHADING_PATH = 'gradio_output_shading.obj'
|
|
| 34 |
|
| 35 |
#opt = tyro.cli(AllConfigs)
|
| 36 |
|
| 37 |
-
ckpt_path = hf_hub_download(repo_id="rgxie/LDM", filename="
|
| 38 |
|
| 39 |
opt = Options(
|
| 40 |
input_size=512,
|
|
@@ -145,7 +145,6 @@ def generate_mv(condition_input_image, prompt, prompt_neg='', input_elevation=0,
|
|
| 145 |
kiui.seed_everything(input_seed)
|
| 146 |
|
| 147 |
os.makedirs(os.path.join(opt.workspace, "gradio"), exist_ok=True)
|
| 148 |
-
output_video_path = os.path.join(opt.workspace,"gradio", GRADIO_VIDEO_PATH)
|
| 149 |
|
| 150 |
# text-conditioned
|
| 151 |
if condition_input_image is None:
|
|
@@ -201,6 +200,8 @@ def generate_3d(input_image, condition_input_image, mv_moedl_option=None, input_
|
|
| 201 |
output_obj_rgb_path = os.path.join(opt.workspace,"gradio", GRADIO_OBJ_PATH)
|
| 202 |
output_obj_albedo_path = os.path.join(opt.workspace,"gradio", GRADIO_OBJ_ALBEDO_PATH)
|
| 203 |
output_obj_shading_path = os.path.join(opt.workspace,"gradio", GRADIO_OBJ_SHADING_PATH)
|
|
|
|
|
|
|
| 204 |
# generate gaussians
|
| 205 |
# [4, 256, 256, 3], float32
|
| 206 |
input_image = torch.from_numpy(input_image).permute(0, 3, 1, 2).float().to(device) # [4, 3, 256, 256]
|
|
@@ -274,8 +275,35 @@ def generate_3d(input_image, condition_input_image, mv_moedl_option=None, input_
|
|
| 274 |
save_obj(vertices, faces, vertex_colors[1], output_obj_albedo_path)
|
| 275 |
save_obj(vertices, faces, vertex_colors[2], output_obj_shading_path)
|
| 276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 277 |
|
| 278 |
-
return output_obj_rgb_path, output_obj_albedo_path, output_obj_shading_path
|
| 279 |
|
| 280 |
|
| 281 |
# gradio UI
|
|
@@ -371,6 +399,8 @@ with block:
|
|
| 371 |
with gr.Row():
|
| 372 |
# multi-view results
|
| 373 |
mv_image_grid = gr.Image(interactive=False, show_label=False)
|
|
|
|
|
|
|
| 374 |
with gr.Row():
|
| 375 |
output_obj_rgb_path = gr.Model3D(
|
| 376 |
label="RGB Model (OBJ Format)",
|
|
@@ -393,7 +423,7 @@ with block:
|
|
| 393 |
outputs=[mv_image_grid, processed_image, input_image],).success(
|
| 394 |
fn=generate_3d,
|
| 395 |
inputs=[input_image, condition_input_image, mv_moedl_option, input_seed],
|
| 396 |
-
outputs=[output_obj_rgb_path, output_obj_albedo_path, output_obj_shading_path] ,
|
| 397 |
)
|
| 398 |
|
| 399 |
|
|
|
|
| 34 |
|
| 35 |
#opt = tyro.cli(AllConfigs)
|
| 36 |
|
| 37 |
+
ckpt_path = hf_hub_download(repo_id="rgxie/LDM", filename="LDM6v01.ckpt")
|
| 38 |
|
| 39 |
opt = Options(
|
| 40 |
input_size=512,
|
|
|
|
| 145 |
kiui.seed_everything(input_seed)
|
| 146 |
|
| 147 |
os.makedirs(os.path.join(opt.workspace, "gradio"), exist_ok=True)
|
|
|
|
| 148 |
|
| 149 |
# text-conditioned
|
| 150 |
if condition_input_image is None:
|
|
|
|
| 200 |
output_obj_rgb_path = os.path.join(opt.workspace,"gradio", GRADIO_OBJ_PATH)
|
| 201 |
output_obj_albedo_path = os.path.join(opt.workspace,"gradio", GRADIO_OBJ_ALBEDO_PATH)
|
| 202 |
output_obj_shading_path = os.path.join(opt.workspace,"gradio", GRADIO_OBJ_SHADING_PATH)
|
| 203 |
+
|
| 204 |
+
output_video_path = os.path.join(opt.workspace,"gradio", GRADIO_VIDEO_PATH)
|
| 205 |
# generate gaussians
|
| 206 |
# [4, 256, 256, 3], float32
|
| 207 |
input_image = torch.from_numpy(input_image).permute(0, 3, 1, 2).float().to(device) # [4, 3, 256, 256]
|
|
|
|
| 275 |
save_obj(vertices, faces, vertex_colors[1], output_obj_albedo_path)
|
| 276 |
save_obj(vertices, faces, vertex_colors[2], output_obj_shading_path)
|
| 277 |
|
| 278 |
+
# images=[]
|
| 279 |
+
# azimuth = np.arange(0, 360, 6, dtype=np.int32)
|
| 280 |
+
# for azi in tqdm.tqdm(azimuth):
|
| 281 |
+
|
| 282 |
+
# cam_pose = torch.from_numpy(orbit_camera(elevation, azi, radius=opt.cam_radius, opengl=True))
|
| 283 |
+
|
| 284 |
+
# if opt.volume_mode == 'TRF_Mesh':
|
| 285 |
+
# cam_view = torch.inverse(cam_pose)
|
| 286 |
+
# cam_view=cam_view.unsqueeze(0).unsqueeze(0).to(device)
|
| 287 |
+
# data['w2c'] = cam_view
|
| 288 |
+
# with torch.autocast(device_type='cuda', dtype=torch.float32):
|
| 289 |
+
# render_images=model.render_frame(data)
|
| 290 |
+
# else:
|
| 291 |
+
# rays_o, rays_d = get_rays(cam_pose, opt.infer_render_size, opt.infer_render_size, opt.fovy) # [h, w, 3]
|
| 292 |
+
# rays_o=rays_o.unsqueeze(0).unsqueeze(0).to(device)# B,V,H,W,3
|
| 293 |
+
# rays_d=rays_d.unsqueeze(0).unsqueeze(0).to(device)
|
| 294 |
+
# data['all_rays_o']=rays_o
|
| 295 |
+
# data['all_rays_d']=rays_d
|
| 296 |
+
# with torch.autocast(device_type='cuda', dtype=torch.float16):
|
| 297 |
+
# render_images=model.render_frame(data)
|
| 298 |
+
# image=render_images['images_pred']
|
| 299 |
+
|
| 300 |
+
# images.append((image.squeeze(1).permute(0,2,3,1).contiguous().float().cpu().numpy() * 255).astype(np.uint8))
|
| 301 |
+
|
| 302 |
+
# images = np.concatenate(images, axis=0)
|
| 303 |
+
# imageio.mimwrite(output_video_path, images, fps=30)
|
| 304 |
+
|
| 305 |
|
| 306 |
+
return output_obj_rgb_path, output_obj_albedo_path, output_obj_shading_path #, output_video_path
|
| 307 |
|
| 308 |
|
| 309 |
# gradio UI
|
|
|
|
| 399 |
with gr.Row():
|
| 400 |
# multi-view results
|
| 401 |
mv_image_grid = gr.Image(interactive=False, show_label=False)
|
| 402 |
+
# with gr.Row():
|
| 403 |
+
# output_video_path = gr.Video(label="video")
|
| 404 |
with gr.Row():
|
| 405 |
output_obj_rgb_path = gr.Model3D(
|
| 406 |
label="RGB Model (OBJ Format)",
|
|
|
|
| 423 |
outputs=[mv_image_grid, processed_image, input_image],).success(
|
| 424 |
fn=generate_3d,
|
| 425 |
inputs=[input_image, condition_input_image, mv_moedl_option, input_seed],
|
| 426 |
+
outputs=[output_obj_rgb_path, output_obj_albedo_path, output_obj_shading_path] , #output_video_path
|
| 427 |
)
|
| 428 |
|
| 429 |
|
core/models.py
CHANGED
|
@@ -108,7 +108,7 @@ class LTRFM_NeRF(nn.Module):
|
|
| 108 |
def extract_mesh(self,
|
| 109 |
planes: torch.Tensor,
|
| 110 |
mesh_resolution: int = 256,
|
| 111 |
-
mesh_threshold: int = 0.
|
| 112 |
use_texture_map: bool = False,
|
| 113 |
texture_resolution: int = 1024,):
|
| 114 |
|
|
|
|
| 108 |
def extract_mesh(self,
|
| 109 |
planes: torch.Tensor,
|
| 110 |
mesh_resolution: int = 256,
|
| 111 |
+
mesh_threshold: int = 0.005,
|
| 112 |
use_texture_map: bool = False,
|
| 113 |
texture_resolution: int = 1024,):
|
| 114 |
|
example/{blue_cat.png → color_cat.png}
RENAMED
|
File without changes
|