Delete mb-dog-walk-3d-reconstruction-vggt-wo-bipl.ipynb
Browse files
mb-dog-walk-3d-reconstruction-vggt-wo-bipl.ipynb
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
{"metadata":{"kernelspec":{"display_name":"Python 3","language":"python","name":"python3"},"language_info":{"name":"python","version":"3.11.0"},"accelerator":"GPU","colab":{"gpuType":"T4"},"kaggle":{"accelerator":"none","dataSources":[{"sourceType":"datasetVersion","sourceId":6232186,"datasetId":2333802,"databundleVersionId":6311634},{"sourceType":"datasetVersion","sourceId":14907436,"datasetId":3320483,"databundleVersionId":15772787}],"dockerImageVersionId":31259,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **3D Reconstruction with VGGT wo/biplet**","metadata":{}},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# movie to frames","metadata":{}},{"cell_type":"code","source":"import cv2\nimport os\n\n# --- Configuration ---\nmovie_path = '/kaggle/input/datasets/stpeteishii/my-movie/path22.MP4'#dog walk\nMAX_FRAMES = 30 # Set the maximum number of frames you want to save\n\n# Clean and recreate the frames folder\n!rm -rf frames\nos.makedirs('frames', exist_ok=True)\n\ncap = cv2.VideoCapture(movie_path)\nif not cap.isOpened():\n print(\"Error: Could not open video file.\")\nelse:\n fps = cap.get(cv2.CAP_PROP_FPS)\n total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n print(f\"FPS: {fps}, Total Video Frames: {total_frames}\")\n\n # Interval to save a frame\n interval = max(1, int(fps / 1)) \n frame_num = 0\n saved_count = 0\n\n print(f\"Extraction Interval: Every {interval} frames\")\n print(f\"Limit: Saving up to {MAX_FRAMES} frames\")\n\n while True:\n ret, frame = cap.read()\n if not ret:\n break\n \n # Process frame based on the interval\n if frame_num % interval == 0:\n height, width = frame.shape[:2]\n\n # Resize the frame (reducing size by half)\n new_width, new_height = width // 2, height // 2\n resized_frame = cv2.resize(frame, (new_width, new_height), interpolation=cv2.INTER_AREA)\n\n # Save the frame\n cv2.imwrite(f'frames/frame_{saved_count:06d}.png', resized_frame)\n saved_count += 1\n \n if saved_count % 10 == 0:\n print(f\"Saved: {saved_count} frames\")\n\n # --- BREAK LOOP IF LIMIT IS REACHED ---\n if saved_count >= MAX_FRAMES:\n print(f\"Reached limit of {MAX_FRAMES} frames. Stopping.\")\n break\n \n frame_num += 1\n\n cap.release()\n print(f\"Done! Total {saved_count} frames extracted.\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Setup","metadata":{}},{"cell_type":"code","source":"import os\nimport sys\nimport gc\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image\nimport struct","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"class Config:\n # ββ VGGT ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n VGGT_MODEL = \"facebook/VGGT-1B\" # HuggingFace model ID (auto-downloaded)\n # bfloat16 on Ampere (A100, etc.), float16 on older (T4, V100)\n # Set to None to auto-detect\n DTYPE = None\n\n # ββ Image preprocessing βββββββββββββββββββββββββββββββββββββββββββββββ\n SQUARE_SIZE = 518 # Biplet crop size β must be compatible with VGGT\n # VGGT's load_and_preprocess_images will resize internally\n\n # ββ Memory guard βββββββββββββββββββββββββββββββββββββββββββββββββββββ\n # T4 (16GB): safe limit ~40 images\n # A100 (40GB): safe limit ~150 images\n MAX_IMAGES = 30\n\n # ββ Confidence threshold (%) for 3D point filtering ββββββββββββββββββ\n CONF_THRESHOLD_PCT = 50.0 # drop lowest N% of confidence scores\n\n # ββ Device ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n DEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nConfig=Config()","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run shell command with error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(cmd, capture_output=capture, text=True, check=False)\n if check and result.returncode != 0:\n print(f\"β Command failed (code {result.returncode})\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Environment Setup β Base packages\n# ============================================================================\n\ndef setup_base_environment():\n \"\"\"Install base Python dependencies\"\"\"\n print(\"\\n=== Setting up Base Environment ===\")\n\n print(\"\\nπ¦ Checking PyTorch (using pre-installed Kaggle version)...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-q\",\n \"opencv-python\", \"pillow\", \"imageio\", \"imageio-ffmpeg\",\n \"plyfile\", \"tqdm\", \"scipy\", \"psutil\", \"trimesh\", \"pycolmap\"])\n\n print(\"\\nπ¦ Installing core utilities...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-q\",\n \"opencv-python\", \"pillow\", \"imageio\", \"imageio-ffmpeg\",\n \"plyfile\", \"tqdm\", \"scipy\", \"psutil\", \"trimesh\"])\n\n print(\"\\nπ¦ Installing pycolmap...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-q\", \"pycolmap\"])\n\n print(\"β Base environment setup complete!\")\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# VGGT Setup\n# ============================================================================\n\ndef setup_vggt():\n \"\"\"Clone and install VGGT\"\"\"\n print(\"\\n=== Setting up VGGT ===\")\n os.chdir('/kaggle/working')\n if os.path.exists('vggt'):\n print(\"β VGGT directory already exists β skipping clone\")\n else:\n print(\"Cloning VGGT repository...\")\n os.system('git clone https://github.com/tztechno/vggt.git')\n os.chdir('/kaggle/working/vggt')\n print(\"Installing VGGT requirements...\")\n os.system('pip install -q -r requirements.txt')\n \n # Add VGGT to Python path\n if '/kaggle/working/vggt' not in sys.path:\n sys.path.insert(0, '/kaggle/working/vggt')\n\n\n # Verify installation\n print(\"\\nπ Verifying VGGT installation...\")\n try:\n from vggt.models.vggt import VGGT\n from vggt.utils.load_fn import load_and_preprocess_images\n from vggt.utils.pose_enc import pose_encoding_to_extri_intri\n from vggt.utils.geometry import unproject_depth_map_to_point_map\n print(\" β VGGT import: OK\")\n except Exception as e:\n print(f\" β VGGT import failed: {e}\")\n raise\n\n print(\"β VGGT setup complete!\")\n print(\" βΉοΈ Model weights (~1.2 GB) will be downloaded on first use.\")\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"setup_base_environment()\nclear_memory()\n\nsetup_vggt()\nclear_memory()\n\n# Ensure VGGT is on the path for the rest of the notebook\nif '/kaggle/working/vggt' not in sys.path:\n sys.path.insert(0, '/kaggle/working/vggt')\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# VGGT Reconstruction\n\n> VGGT processes **all images at once** as a feed-forward pass β no pair selection or feature matching needed.\n\nThe model outputs:\n- `extrinsic` (S, 3, 4) β camera-from-world matrices (OpenCV convention)\n- `intrinsic` (S, 3, 3) β camera intrinsic matrices\n- `world_points` (S, H, W, 3) β 3D point maps\n- `world_points_conf` (S, H, W) β confidence scores\n- `depth` (S, H, W, 1) β depth maps\n- `depth_conf` (S, H, W) β depth confidence","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# VGGT Model Loading\n# ============================================================================\n\ndef load_vggt_model():\n \"\"\"Load VGGT model (weights auto-downloaded from HuggingFace on first run).\"\"\"\n from vggt.models.vggt import VGGT\n\n device = Config.DEVICE\n\n print(f\"\\n=== Loading VGGT model ===\")\n print(f\"Device: {device}\")\n print(f\"βΉοΈ Downloading weights on first run (~1.2 GB)...\")\n\n model = VGGT.from_pretrained(Config.VGGT_MODEL).to(device)\n model.eval()\n\n # Determine dtype\n if Config.DTYPE is not None:\n dtype = Config.DTYPE\n elif torch.cuda.is_available():\n cap = torch.cuda.get_device_capability()[0]\n dtype = torch.bfloat16 if cap >= 8 else torch.float16\n else:\n dtype = torch.float32\n\n print(f\"β VGGT model loaded | dtype={dtype}\")\n get_memory_info()\n\n return model, dtype\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# VGGT Inference\n# ============================================================================\n\ndef run_vggt(model, dtype, image_paths, max_images=None):\n \"\"\"\n Run VGGT on a list of image paths.\n VGGT processes all images jointly β no pair selection needed.\n\n Args:\n model : loaded VGGT model\n dtype : torch dtype (bfloat16 / float16 / float32)\n image_paths : list of paths to preprocessed square images\n max_images : cap number of images to avoid OOM\n\n Returns:\n predictions : dict of tensors, all on CPU\n images : (S, 3, H, W) tensor of preprocessed images, on CPU\n image_paths : list of paths actually used (may be subsampled)\n \"\"\"\n from vggt.utils.load_fn import load_and_preprocess_images\n from vggt.utils.pose_enc import pose_encoding_to_extri_intri\n from vggt.utils.geometry import unproject_depth_map_to_point_map\n\n device = Config.DEVICE\n\n # ββ Limit image count βββββββββββββββββββββββββββββββββββββββββββββββββ\n if max_images and len(image_paths) > max_images:\n print(f\"β οΈ Limiting from {len(image_paths)} β {max_images} images to avoid OOM\")\n step = len(image_paths) / max_images\n image_paths = [image_paths[int(i * step)] for i in range(max_images)]\n\n print(f\"\\n=== Running VGGT on {len(image_paths)} images ===\")\n print(\"Loading and preprocessing images...\")\n get_memory_info()\n\n images = load_and_preprocess_images(image_paths).to(device)\n print(f\" Preprocessed image tensor: {images.shape} (S, C, H, W)\")\n\n # ββ Forward pass ββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n print(\"Running VGGT inference (this may take a moment for large batches)...\")\n with torch.no_grad():\n with torch.cuda.amp.autocast(dtype=dtype):\n predictions = model(images)\n\n # ββ Decode pose encoding β extrinsic / intrinsic ββββββββββββββββββββββ\n print(\"Converting pose encoding to camera matrices...\")\n with torch.no_grad():\n extrinsic, intrinsic = pose_encoding_to_extri_intri(\n predictions[\"pose_enc\"], images.shape[-2:]\n )\n\n # pose_encoding_to_extri_intri may return (1, S, 4, 4) / (1, S, 3, 3)\n # Strip any leading batch dimensions so shapes are (S, 4, 4) / (S, 3, 3)\n def strip_batch_dims(t, target_ndim):\n \"\"\"Remove leading size-1 dimensions until ndim == target_ndim.\"\"\"\n while t.ndim > target_ndim:\n if t.shape[0] == 1:\n t = t.squeeze(0)\n else:\n raise ValueError(\n f\"Cannot strip batch dim: shape {t.shape}, target ndim {target_ndim}\"\n )\n return t\n\n extrinsic = strip_batch_dims(extrinsic, target_ndim=3) # (S, 4, 4) or (S, 3, 4)\n intrinsic = strip_batch_dims(intrinsic, target_ndim=3) # (S, 3, 3)\n print(f\" extrinsic shape: {extrinsic.shape}\")\n print(f\" intrinsic shape: {intrinsic.shape}\")\n\n predictions[\"extrinsic\"] = extrinsic\n predictions[\"intrinsic\"] = intrinsic\n\n # ββ Unproject depth β world-space 3D point map ββββββββββββββββββββββββ\n print(\"Unprojecting depth maps to 3D point map...\")\n with torch.no_grad():\n depth = predictions[\"depth\"]\n\n # Strip any leading batch dimensions first: (1, S, ...) β (S, ...)\n depth = strip_batch_dims(depth, target_ndim=4)\n\n # Normalize channel layout to (S, H, W, 1) as geometry.py expects\n if depth.ndim == 3:\n # (S, H, W) β (S, H, W, 1)\n depth = depth.unsqueeze(-1)\n elif depth.ndim == 4:\n if depth.shape[-1] != 1:\n # (S, 1, H, W) β (S, H, W, 1)\n if depth.shape[1] == 1:\n depth = depth.permute(0, 2, 3, 1).contiguous()\n else:\n raise ValueError(\n f\"Unexpected depth shape after stripping batch dim: {depth.shape}\"\n )\n # else: already (S, H, W, 1)\n\n print(f\" Depth shape (normalised): {depth.shape}\") # β (S, H, W, 1)\n assert depth.ndim == 4 and depth.shape[-1] == 1, \\\n f\"depth must be (S, H, W, 1) at this point, got {depth.shape}\"\n\n pts3d_from_depth = unproject_depth_map_to_point_map(\n depth, extrinsic, intrinsic\n )\n\n predictions[\"world_points_from_depth\"] = pts3d_from_depth # (S, H, W, 3)\n\n # ββ Move everything to CPU to free device memory βββββββββββββββββββββββ\n # ββ Strip batch dims + move everything to CPU βββββββββββββββββββββββββ\n print(\"Moving predictions to CPU...\")\n\n def strip_batch_dims(t, target_ndim):\n while t.ndim > target_ndim and t.shape[0] == 1:\n t = t.squeeze(0)\n return t\n\n EXPECTED_NDIMS = {\n \"depth\": 4, # (S, H, W, 1)\n \"depth_conf\": 3, # (S, H, W)\n \"world_points\": 4, # (S, H, W, 3)\n \"world_points_from_depth\": 4, # (S, H, W, 3)\n \"extrinsic\": 3, # (S, 4, 4)\n \"intrinsic\": 3, # (S, 3, 3)\n \"pose_enc\": 3,\n }\n\n predictions_cpu = {}\n for k, v in predictions.items():\n if isinstance(v, torch.Tensor):\n if k in EXPECTED_NDIMS:\n v = strip_batch_dims(v, EXPECTED_NDIMS[k])\n v = v.cpu()\n predictions_cpu[k] = v\n\n images_cpu = images.cpu()\n del predictions, images\n clear_memory()\n\n print(\"β VGGT inference complete!\")\n get_memory_info()\n return predictions_cpu, images_cpu, image_paths\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Process3οΌ VGGT Predictions β COLMAP Format\n\n**Key mapping:**\n\n| VGGT output | COLMAP usage |\n|---|---|\n| `extrinsic` (S, 3, 4) | R, t for each camera (OpenCV = COLMAP convention) |\n| `intrinsic` (S, 3, 3) | fx, fy, cx, cy per camera |\n| `world_points_from_depth` (S, H, W, 3) | 3D point positions |\n| `world_points_conf` (S, H, W) | confidence filtering |","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# COLMAP binary writers (unchanged from original)\n# ============================================================================\n\nimport numpy as np\nimport struct\nfrom pathlib import Path\nfrom scipy.spatial.transform import Rotation\n\n\ndef write_next_bytes(fid, data, format_str):\n if isinstance(data, (list, tuple, np.ndarray)):\n fid.write(struct.pack(\"<\" + format_str, *data))\n else:\n fid.write(struct.pack(\"<\" + format_str, data))\n\n\ndef matrix_to_quaternion_translation(matrix_3x4: np.ndarray):\n \"\"\"3Γ4 [R | t] β COLMAP quaternion [w, x, y, z] + translation t.\"\"\"\n R = matrix_3x4[:3, :3]\n t = matrix_3x4[:3, 3]\n rot = Rotation.from_matrix(R)\n quat = rot.as_quat() # [x, y, z, w]\n qvec = np.array([quat[3], quat[0], quat[1], quat[2]]) # COLMAP: [w, x, y, z]\n return qvec, t\n\n\ndef write_cameras_binary(cameras, path):\n with open(path, \"wb\") as fid:\n write_next_bytes(fid, len(cameras), \"Q\")\n for cam_id, cam in cameras.items():\n write_next_bytes(fid, cam_id, \"I\")\n write_next_bytes(fid, 1, \"I\") # PINHOLE\n write_next_bytes(fid, cam['width'], \"Q\")\n write_next_bytes(fid, cam['height'], \"Q\")\n for p in cam['params']:\n write_next_bytes(fid, float(p), \"d\")\n\n\ndef write_images_binary(images_data, path):\n with open(path, \"wb\") as fid:\n write_next_bytes(fid, len(images_data), \"Q\")\n for img_id, img in images_data.items():\n write_next_bytes(fid, img_id, \"I\")\n write_next_bytes(fid, img['qvec'], \"dddd\")\n write_next_bytes(fid, img['tvec'], \"ddd\")\n write_next_bytes(fid, img['camera_id'], \"I\")\n for char in img['name']:\n write_next_bytes(fid, char.encode(\"utf-8\"), \"c\")\n write_next_bytes(fid, b\"\\x00\", \"c\")\n write_next_bytes(fid, len(img['xys']), \"Q\")\n for xy, pid in zip(img['xys'], img['point3D_ids']):\n write_next_bytes(fid, xy, \"dd\")\n write_next_bytes(fid, pid, \"Q\")\n\n\ndef write_points3d_binary(points3D, path):\n with open(path, \"wb\") as fid:\n write_next_bytes(fid, len(points3D), \"Q\")\n for pid, pt in enumerate(points3D):\n write_next_bytes(fid, pid, \"Q\")\n write_next_bytes(fid, pt['xyz'], \"ddd\")\n write_next_bytes(fid, pt['rgb'], \"BBB\")\n write_next_bytes(fid, pt['error'],\"d\")\n write_next_bytes(fid, len(pt['image_ids']), \"Q\")\n for iid, p2d_idx in zip(pt['image_ids'], pt['point2D_idxs']):\n write_next_bytes(fid, int(iid), \"I\")\n write_next_bytes(fid, int(p2d_idx),\"I\")\n\n\n# ============================================================================\n# VGGT β COLMAP conversion (replaces extract_scene_data / convert_mast3r_to_colmap)\n# ============================================================================\n\ndef extract_vggt_scene_data(predictions, image_paths, conf_threshold_pct=20.0, verbose=True):\n \"\"\"\n Convert VGGT predictions to COLMAP cameras / images / points3D structures.\n\n Args:\n predictions : dict returned by run_vggt()\n image_paths : list of source image paths (for colors & names)\n conf_threshold_pct : drop the lowest N% confidence points\n verbose : print progress\n\n Returns:\n cameras : {camera_id: {...}}\n images_data: {image_id: {...}}\n points3D : [{xyz, rgb, error, image_ids, point2D_idxs}]\n \"\"\"\n import numpy as np\n import torch\n\n cameras = {}\n images_data = {}\n points3D = []\n\n # ββ Grab tensors ββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n extrinsic = predictions[\"extrinsic\"] # (S, 3, 4)\n intrinsic = predictions[\"intrinsic\"] # (S, 3, 3)\n\n # Prefer depth-unprojected points (more accurate)\n if \"world_points_from_depth\" in predictions:\n pts3d = predictions[\"world_points_from_depth\"] # (S, H, W, 3)\n conf = predictions.get(\"depth_conf\", None) # (S, H, W) or (S, H, W, 1)\n if verbose:\n print(\" Using depth-unprojected 3D points\")\n else:\n pts3d = predictions[\"world_points\"] # (S, H, W, 3)\n conf = predictions.get(\"world_points_conf\", None)\n if verbose:\n print(\" Using world_points 3D points\")\n\n if isinstance(extrinsic, torch.Tensor):\n extrinsic = extrinsic.detach().cpu().numpy()\n if isinstance(intrinsic, torch.Tensor):\n intrinsic = intrinsic.detach().cpu().numpy()\n if isinstance(pts3d, torch.Tensor):\n pts3d = pts3d.detach().cpu().numpy()\n if conf is not None and isinstance(conf, torch.Tensor):\n conf = conf.detach().cpu().numpy()\n\n # Squeeze trailing dim if depth_conf has shape (S, H, W, 1)\n if conf is not None and conf.ndim == 4:\n conf = conf.squeeze(-1)\n\n S, H, W = pts3d.shape[:3]\n\n if verbose:\n print(f\" Views: {S} Point map: {H}x{W}\")\n\n # ββ Build COLMAP image size from the original image βββββββββββββββββββ\n # VGGT internally resizes; we store the preprocessed size for COLMAP\n img_h, img_w = H, W\n\n # ββ Cameras & images ββββββββββββββββββββββββββββββββββββββββββββββββββ\n for idx in range(S):\n K = intrinsic[idx] # (3, 3)\n ext = extrinsic[idx] # (3, 4)\n\n fx = float(K[0, 0])\n fy = float(K[1, 1])\n cx = float(K[0, 2])\n cy = float(K[1, 2])\n\n cameras[idx] = {\n 'model' : 'PINHOLE',\n 'width' : img_w,\n 'height': img_h,\n 'params': [fx, fy, cx, cy]\n }\n\n # VGGT extrinsic is camera-from-world [R | t] β same as COLMAP convention\n # No inversion needed.\n qvec, tvec = matrix_to_quaternion_translation(ext)\n\n img_name = Path(image_paths[idx]).name if idx < len(image_paths) else f\"image_{idx:04d}.jpg\"\n\n images_data[idx + 1] = {\n 'qvec' : qvec,\n 'tvec' : tvec,\n 'camera_id' : idx,\n 'name' : img_name,\n 'xys' : np.empty((0, 2)),\n 'point3D_ids': np.empty((0,), dtype=np.int64),\n }\n\n # ββ 3D points βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n if verbose:\n print(\" Extracting 3D points with colors...\")\n\n # Build global confidence mask\n if conf is not None:\n conf_flat = conf.reshape(-1)\n thr = np.percentile(conf_flat[np.isfinite(conf_flat)], conf_threshold_pct)\n conf_mask = (conf >= thr).reshape(S, H, W)\n else:\n conf_mask = np.ones((S, H, W), dtype=bool)\n\n for view_idx in range(S):\n # Load source image for colors\n src_path = image_paths[view_idx] if view_idx < len(image_paths) else None\n if src_path and os.path.exists(src_path):\n img = np.array(Image.open(src_path).convert('RGB').resize((W, H), Image.LANCZOS))\n else:\n img = np.full((H, W, 3), 128, dtype=np.uint8)\n\n pts_view = pts3d[view_idx] # (H, W, 3)\n mask_view = conf_mask[view_idx] # (H, W)\n\n pts_flat = pts_view[mask_view] # (N, 3)\n col_flat = img[mask_view] # (N, 3)\n\n for pt, col in zip(pts_flat, col_flat):\n if np.all(np.isfinite(pt)):\n points3D.append({\n 'xyz' : pt.astype(np.float64),\n 'rgb' : col.astype(np.uint8),\n 'error' : 0.0,\n 'image_ids' : np.array([], dtype=np.int32),\n 'point2D_idxs': np.array([], dtype=np.int32),\n })\n\n if verbose:\n print(f\" Extracted {len(points3D):,} 3D points\")\n print(f\" Built {len(cameras)} cameras, {len(images_data)} image entries\")\n\n return cameras, images_data, points3D\n\n\ndef save_images_to_colmap_dir(image_paths, images_dir, verbose=True):\n \"\"\"Copy preprocessed images to the COLMAP images/ directory.\"\"\"\n import shutil\n images_dir = Path(images_dir)\n images_dir.mkdir(parents=True, exist_ok=True)\n\n for idx, src in enumerate(image_paths):\n dst = images_dir / Path(src).name\n shutil.copy2(src, dst)\n\n if verbose:\n print(f\" Copied {len(image_paths)} images to {images_dir}\")\n\n\ndef convert_vggt_to_colmap(predictions, image_paths, output_dir,\n conf_threshold_pct=20.0, verbose=True):\n \"\"\"\n Convert VGGT predictions to a COLMAP sparse reconstruction on disk.\n\n Directory structure created:\n output_dir/\n βββ images/ (copied source images)\n βββ sparse/0/\n βββ cameras.bin\n βββ images.bin\n βββ points3D.bin\n\n Args:\n predictions : dict from run_vggt()\n image_paths : list of image file paths used for inference\n output_dir : root output directory\n conf_threshold_pct : drop lowest N% confidence points\n verbose : print progress\n\n Returns:\n Path to sparse/0 directory\n \"\"\"\n output_dir = Path(output_dir)\n sparse_dir = output_dir / \"sparse\" / \"0\"\n images_dir = output_dir / \"images\"\n\n sparse_dir.mkdir(parents=True, exist_ok=True)\n images_dir.mkdir(parents=True, exist_ok=True)\n\n if verbose:\n print(\"\\n\" + \"=\"*70)\n print(\"Converting VGGT predictions β COLMAP format\")\n print(\"=\"*70)\n print(f\"Output: {output_dir}\")\n\n cameras, images_data, points3D = extract_vggt_scene_data(\n predictions, image_paths, conf_threshold_pct, verbose\n )\n\n save_images_to_colmap_dir(image_paths, images_dir, verbose)\n\n if verbose:\n print(\"\\nWriting COLMAP binary files...\")\n\n write_cameras_binary(cameras, sparse_dir / \"cameras.bin\")\n if verbose:\n print(f\" β cameras.bin ({len(cameras)} cameras)\")\n\n write_images_binary(images_data, sparse_dir / \"images.bin\")\n if verbose:\n print(f\" β images.bin ({len(images_data)} images)\")\n\n write_points3d_binary(points3D, sparse_dir / \"points3D.bin\")\n if verbose:\n print(f\" β points3D.bin ({len(points3D):,} points)\")\n\n if verbose:\n print(\"\\n\" + \"=\"*70)\n print(\"β COLMAP conversion complete!\")\n print(\"=\"*70)\n\n return sparse_dir\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# bin to ply","metadata":{}},{"cell_type":"code","source":"def convert_colmap_bin_to_ply(sparse_dir, output_ply_path):\n \"\"\"\n Generate a PLY point cloud from COLMAP binary files using pycolmap.\n\n Args:\n sparse_dir : path to sparse/0/ directory\n output_ply_path : destination PLY file path\n \"\"\"\n import pycolmap\n from plyfile import PlyData, PlyElement\n\n print(f\"\\n=== Converting COLMAP bin β PLY ===\")\n\n rec = pycolmap.Reconstruction(str(sparse_dir))\n print(f\"Loaded reconstruction:\")\n print(f\" {len(rec.cameras)} cameras\")\n print(f\" {len(rec.images)} images\")\n print(f\" {len(rec.points3D)} points\")\n\n if len(rec.points3D) == 0:\n print(\"β No 3D points in reconstruction!\")\n return 0\n\n points = np.array([p.xyz for p in rec.points3D.values()])\n colors = np.array([p.color for p in rec.points3D.values()])\n\n print(f\"\\nPoint cloud statistics:\")\n print(f\" Total points: {len(points):,}\")\n print(f\" X range: [{points[:,0].min():.3f}, {points[:,0].max():.3f}]\")\n print(f\" Y range: [{points[:,1].min():.3f}, {points[:,1].max():.3f}]\")\n print(f\" Z range: [{points[:,2].min():.3f}, {points[:,2].max():.3f}]\")\n\n vertices = np.array(\n [(p[0], p[1], p[2], c[0], c[1], c[2]) for p, c in zip(points, colors)],\n dtype=[('x','f4'),('y','f4'),('z','f4'),\n ('red','u1'),('green','u1'),('blue','u1')]\n )\n PlyData([PlyElement.describe(vertices, 'vertex')]).write(output_ply_path)\n\n print(f\"β Saved PLY β {output_ply_path}\")\n return len(points)\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Main Pipeline","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# Configuration β edit these paths and settings\n# ============================================================================\n\nimage_dir = \"/kaggle/working/frames\"\noutput_dir = \"/kaggle/working/output\"\n\nsquare_size = 518 # Biplet crop size (VGGT works well with 518)\nmax_images = Config.MAX_IMAGES # Hard cap for GPU memory safety\nconf_threshold = Config.CONF_THRESHOLD_PCT # Drop lowest N% confidence\n\nos.makedirs(output_dir, exist_ok=True)\nprocessed_image_dir = image_dir #os.path.join(output_dir, \"processed_images\")\nprint(max_images)","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ββ Step 0: Biplet-Square Normalization ββββββββββββββββββββββββββββββββββββ\nprint(\"=\" * 70)\nprint(\"Step 0: Biplet-Square Normalization\")\nprint(\"=\" * 70)\n\n'''\nnormalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=processed_image_dir,\n size=square_size,\n max_images=max_images\n)\n'''\n\n\n# Collect processed image paths (sorted for reproducibility)\nimage_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png','JPG'))\n])\n\nprint(f\"\\nπΈ Found {len(image_paths)} processed images\")\nif len(image_paths) > max_images:\n print(f\"β οΈ Will downsample to {max_images} images (GPU memory guard)\")\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ββ Step 1: VGGT Inference βββββββββββββββββββββββββββββββββββββββββββββββββ\nprint(\"\\n\" + \"=\" * 70)\nprint(\"Step 1: VGGT Inference\")\nprint(\"=\" * 70)\n\nmodel, dtype = load_vggt_model()\nclear_memory()\n\npredictions, images_tensor, used_paths = run_vggt(\n model, dtype, image_paths, max_images=max_images\n)\n\n# Free model from GPU β no longer needed\ndel model\nclear_memory()\n\nprint(f\"β Inference complete β {len(used_paths)} views processed\")\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ββ Step 2 (Process3): VGGT β COLMAP βββββββββββββββββββββββββββββββββββββββ\nprint(\"\\n\" + \"=\" * 70)\nprint(\"Step 2: VGGT β COLMAP Conversion\")\nprint(\"=\" * 70)\n\ncolmap_dir = os.path.join(output_dir, \"colmap\")\n\nsparse_dir = convert_vggt_to_colmap(\n predictions=predictions,\n image_paths=used_paths,\n output_dir=colmap_dir,\n conf_threshold_pct=conf_threshold,\n verbose=True\n)\n\nclear_memory()\n","metadata":{},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ββ Step 3: COLMAP bin β PLY βββββββββββββββββββββββββββββββββββββββββββββββ\nprint(\"\\n\" + \"=\" * 70)\nprint(\"Step 3: bin β PLY\")\nprint(\"=\" * 70)\n\nply_path = os.path.join(colmap_dir, \"point_cloud.ply\")\nnum_points = convert_colmap_bin_to_ply(sparse_dir, ply_path)\n\nprint(f\"\\nπ Pipeline complete! {num_points:,} points in PLY.\")\nprint(f\" PLY file: {ply_path}\")\n","metadata":{},"outputs":[],"execution_count":null}]}
|
|
|
|
|
|