Upload mb-dino-mast3r-gs-kg-34.ipynb
Browse files
mb-dino-mast3r-gs-kg-34.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU","kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":14554378,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31236,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **dino-mast3r-gs-kg** \n\n","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"#### v26 was successful\n#### def extract_colmap_data(scene, image_paths, max_points=1000000):","metadata":{}},{"cell_type":"markdown","source":"https://www.kaggle.com/code/stpeteishii/british-museum-dino-lightglue-colmap-gs-12","metadata":{}},{"cell_type":"markdown","source":"\n## Key Features of This Pipeline\n\n* **Utilization of MASt3R**: Employs the new **MASt3R** model, which achieved outstanding results at **IMC2025** (Image Matching Challenge 2025).\n* **Process Integration**: MASt3R handles the tasks previously managed by the **ALIKED, LightGlue, and COLMAP** workflow mentioned in the previous notes.\n* **Pipeline Workflow**: The updated data flow follows this sequence:\n**Normalize → DINO → MASt3R → Gaussian Splatting**.\n\n","metadata":{}},{"cell_type":"markdown","source":"\n","metadata":{}},{"cell_type":"code","source":"!pip install pycolmap","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# MASt3R-based Gaussian Splatting Pipeline\n# Preserves: DINO pair selection + Biplet-Square Normalization\n# Replaces: ALIKED/LightGlue/COLMAP with MASt3R\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image, ImageFilter\nimport pycolmap\nimport struct\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection - CRITICAL for memory\n GLOBAL_TOPK = 20 # Reduced from 50 - each image pairs with top 20\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\"\n \n # MASt3R - Reduced size for memory\n MAST3R_MODEL = \"/kaggle/working/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n MAST3R_IMAGE_SIZE = 224 # Small size to save memory\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n \n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n# ============================================================================\n# Environment Setup\n# ============================================================================\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"❌ Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_base_environment():\n \"\"\"Setup base Python environment\"\"\"\n print(\"\\n=== Setting up Base Environment ===\")\n \n # NumPy fix for Python 3.12\n print(\"\\n📦 Fixing NumPy...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n \n # PyTorch\n print(\"\\n📦 Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n \n # Core utilities\n print(\"\\n📦 Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\",\n \"scipy\", # for rotation conversions and image resizing\n \"psutil\" # for memory monitoring\n ])\n \n # Transformers for DINO\n print(\"\\n📦 Installing transformers...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n \n # pycolmap for COLMAP format\n print(\"\\n📦 Installing pycolmap...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n \n print(\"✓ Base environment setup complete!\")\n\n\ndef setup_mast3r():\n \"\"\"Install and setup MASt3R\"\"\"\n print(\"\\n=== Setting up MASt3R ===\")\n \n os.chdir('/kaggle/working')\n \n # Remove existing installation\n if os.path.exists('mast3r'):\n print(\"Removing existing MASt3R installation...\")\n os.system('rm -rf mast3r')\n \n # Clone repository\n print(\"Cloning MASt3R repository...\")\n os.system('git clone --recursive https://github.com/naver/mast3r')\n os.chdir('/kaggle/working/mast3r')\n \n # Check dust3r directory\n print(\"Checking dust3r structure...\")\n os.system('ls -la dust3r/')\n \n # Install dust3r\n print(\"Installing dust3r...\")\n os.system('cd dust3r && python -m pip install -e .')\n \n # Install croco\n print(\"Installing croco...\")\n os.system('cd dust3r/croco && python -m pip install -e .')\n \n # Install requirements\n print(\"Installing MASt3R requirements...\")\n os.system('pip install -r requirements.txt')\n \n # Download model weights\n print(\"Downloading model weights...\")\n os.system('mkdir -p checkpoints')\n os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n \n # Install additional dependencies\n print(\"Installing additional dependencies...\")\n os.system('pip install trimesh matplotlib roma')\n \n # Add to path\n sys.path.insert(0, '/kaggle/working/mast3r')\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n \n # Verification\n print(\"\\n🔍 Verifying MASt3R installation...\")\n try:\n from mast3r.model import AsymmetricMASt3R\n print(\" ✓ MASt3R import: OK\")\n except Exception as e:\n print(f\" ❌ MASt3R import failed: {e}\")\n raise\n \n print(\"✓ MASt3R setup complete!\")\n\n# ============================================================================\n# Step 0: Biplet-Square Normalization (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = input_dir\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n converted_count = 0\n size_stats = {}\n\n for img_file in sorted(os.listdir(input_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops\n\n# ============================================================================\n# Step 1: DINO-based Pair Selection (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors with memory management\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n batch_size = 4 # Small batch to save memory\n \n for i in tqdm(range(0, len(image_paths), batch_size)):\n batch_paths = image_paths[i:i+batch_size]\n batch_imgs = []\n \n for img_path in batch_paths:\n img = load_torch_image(img_path, device)\n batch_imgs.append(img)\n \n batch_tensor = torch.cat(batch_imgs, dim=0)\n \n with torch.no_grad():\n inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n \n # Clear batch memory\n del batch_tensor, inputs, outputs, desc\n clear_memory()\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model, processor\n clear_memory()\n \n print(\"After DINO extraction:\")\n get_memory_info()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n # Remove duplicates\n pairs = list(set(pairs))\n \n return pairs\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n \"\"\"\n Select diverse pairs to ensure good image coverage\n Strategy: Select pairs that maximize image coverage\n \"\"\"\n import random\n random.seed(42)\n \n if len(pairs) <= max_pairs:\n return pairs\n \n print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n \n # Count how many times each image appears in pairs\n image_counts = {i: 0 for i in range(num_images)}\n for i, j in pairs:\n image_counts[i] += 1\n image_counts[j] += 1\n \n # Sort pairs by: prefer pairs with less-connected images\n def pair_score(pair):\n i, j = pair\n # Lower score = images appear in fewer pairs = more diverse\n return image_counts[i] + image_counts[j]\n \n pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n pairs_scored.sort(key=lambda x: x[1])\n \n # Select pairs greedily to maximize coverage\n selected = []\n selected_images = set()\n \n # Phase 1: Select pairs that add new images (greedy coverage)\n for pair, score in pairs_scored:\n if len(selected) >= max_pairs:\n break\n i, j = pair\n # Prefer pairs that include new images\n if i not in selected_images or j not in selected_images:\n selected.append(pair)\n selected_images.add(i)\n selected_images.add(j)\n \n # Phase 2: Fill remaining slots with high-similarity pairs\n if len(selected) < max_pairs:\n remaining = [p for p, s in pairs_scored if p not in selected]\n random.shuffle(remaining)\n selected.extend(remaining[:max_pairs - len(selected)])\n \n print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n \n return selected\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n \"\"\"DINO-based pair selection with intelligent limiting\"\"\"\n device = Config.DEVICE\n\n # DINO global features\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from DINO: {len(pairs)}\")\n \n # Apply intelligent pair selection if limit specified\n if max_pairs and len(pairs) > max_pairs:\n pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n \n return pairs\n\n# ============================================================================\n# Step 2: MASt3R Reconstruction (REPLACES ALIKED/LIGHTGLUE/COLMAP)\n# ============================================================================\n\ndef load_mast3r_model(device='cuda'):\n \"\"\"Load MASt3R model\"\"\"\n from mast3r.model import AsymmetricMASt3R\n \n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n \n print(f\"✓ MASt3R model loaded on {device}\")\n return model\n\ndef load_images_for_mast3r(image_paths, size=224):\n \"\"\"Load images using DUSt3R's format with reduced size\"\"\"\n print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n \n from dust3r.utils.image import load_images\n \n # Load images using DUSt3R's loader with reduced size\n images = load_images(image_paths, size=size, verbose=True)\n \n return images\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, max_pairs=None):\n \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n \n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n \n # Limit number of pairs if specified\n if max_pairs and len(pairs) > max_pairs:\n print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n # Select pairs more evenly distributed\n step = max(1, len(pairs) // max_pairs)\n pairs = pairs[::step][:max_pairs]\n \n print(f\"Processing {len(pairs)} pairs...\")\n \n # Load images in smaller size\n print(f\"Loading {len(image_paths)} images at {Config.MAST3R_IMAGE_SIZE}x{Config.MAST3R_IMAGE_SIZE}...\")\n images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n \n print(f\"Loaded {len(images)} images\")\n print(\"After loading images:\")\n get_memory_info()\n \n # Create all image pairs at once\n print(f\"Creating {len(pairs)} image pairs...\")\n mast3r_pairs = []\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n mast3r_pairs.append((images[idx1], images[idx2]))\n \n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n \n # Run inference (this returns the dict format we need)\n output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n \n # Clear pairs from memory\n del mast3r_pairs\n clear_memory()\n \n print(\"✓ MASt3R inference complete\")\n print(\"After inference:\")\n get_memory_info()\n \n # Global alignment\n print(\"Running global alignment...\")\n scene = global_aligner(\n output, \n device=device, \n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n \n # Clear output after creating scene\n del output\n clear_memory()\n \n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=150, # Reduced from 300\n schedule='cosine', \n lr=0.01\n )\n \n print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n \n return scene, images","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#v26\ndef extract_colmap_data(scene, image_paths, max_points=1000000):\n \"\"\"\n Extract COLMAP-compatible camera parameters and 3D points from MASt3R scene\n \n Args:\n scene: MASt3R scene object\n image_paths: List of image paths\n max_points: Maximum number of 3D points to extract (default: 1M)\n \"\"\"\n print(\"\\n=== Extracting COLMAP-compatible data ===\")\n \n # Extract point cloud\n pts_all = scene.get_pts3d()\n print(f\"pts_all type: {type(pts_all)}\")\n \n if isinstance(pts_all, list):\n print(f\"pts_all is a list with {len(pts_all)} elements\")\n if len(pts_all) > 0:\n print(f\"First element type: {type(pts_all[0])}\")\n if hasattr(pts_all[0], 'shape'):\n print(f\"First element shape: {pts_all[0].shape}\")\n \n pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p) \n for p in pts_all])\n print(f\"pts_all shape after conversion: {pts_all.shape}\")\n \n if len(pts_all.shape) == 4:\n print(f\"Found batched point cloud: {pts_all.shape}\")\n B, H, W, _ = pts_all.shape\n pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy() \n \n # Extract colors\n colors = []\n for img_path in image_paths:\n img = Image.open(img_path).resize((W, H))\n colors.append(np.array(img))\n colors = np.stack(colors).reshape(-1, 3) / 255.0\n else:\n pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n colors = np.ones((len(pts3d), 3)) * 0.5\n \n print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n \n # **DOWNSAMPLE POINTS TO REDUCE MEMORY USAGE**\n if len(pts3d) > max_points:\n print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points to reduce memory usage...\")\n \n # Remove invalid points first\n valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n pts3d_valid = pts3d[valid_mask]\n colors_valid = colors[valid_mask]\n \n # Random sampling\n indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n pts3d = pts3d_valid[indices]\n colors = colors_valid[indices]\n \n print(f\"✓ Downsampled to {len(pts3d)} points\")\n \n # Extract camera parameters\n print(\"Extracting camera parameters...\")\n \n # 【重要】MASt3Rのポーズはcamera-to-world形式\n # COLMAPはworld-to-camera形式を要求するので逆行列が必要\n poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n \n # camera-to-world を world-to-camera に変換\n poses = []\n for i, pose_c2w in enumerate(poses_c2w):\n # 4x4行列の逆行列を計算\n pose_w2c = np.linalg.inv(pose_c2w)\n poses.append(pose_w2c)\n \n poses = np.array(poses)\n print(f\"Converted to world-to-camera poses for COLMAP\")\n \n # 焦点距離と主点を取得\n focals = scene.get_focals().detach().cpu().numpy()\n pp = scene.get_principal_points().detach().cpu().numpy()\n print(f\"Focals shape: {focals.shape}\")\n print(f\"Principal points shape: {pp.shape}\")\n \n # MASt3Rの処理サイズ(通常224x224)\n mast3r_size = 224.0\n \n cameras = []\n for i, img_path in enumerate(image_paths):\n img = Image.open(img_path)\n W, H = img.size\n \n # 元画像サイズとのスケール比\n scale = W / mast3r_size\n \n # focalsは[N,1]の形式(fx=fyの等方性カメラ)\n if focals.shape[1] == 1:\n focal_mast3r = float(focals[i, 0])\n fx = fy = focal_mast3r * scale\n else:\n fx = float(focals[i, 0]) * scale\n fy = float(focals[i, 1]) * scale\n \n # 主点もスケーリング\n cx = float(pp[i, 0]) * scale\n cy = float(pp[i, 1]) * scale\n \n camera = {\n 'camera_id': i + 1,\n 'model': 'PINHOLE',\n 'width': W,\n 'height': H,\n 'params': [fx, fy, cx, cy]\n }\n cameras.append(camera)\n \n if i == 0:\n print(f\"\\nExample camera 0:\")\n print(f\" Image size: {W}x{H}\")\n print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n print(f\" Pose (first row): {poses[i][0]}\")\n \n print(f\"\\n✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n \n return pts3d, colors, cameras, poses","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import struct\nfrom pathlib import Path\n\ndef save_colmap_reconstruction(pts3d, colors, cameras, poses, image_paths, output_dir):\n \"\"\"Save reconstruction in COLMAP binary format by writing files directly\"\"\"\n print(\"\\n=== Saving COLMAP reconstruction ===\")\n \n sparse_dir = Path(output_dir) / 'sparse' / '0'\n sparse_dir.mkdir(parents=True, exist_ok=True)\n \n print(f\" Writing COLMAP files directly to {sparse_dir}...\")\n \n # Write cameras.bin\n write_cameras_binary(cameras, sparse_dir / 'cameras.bin')\n print(f\" ✓ Wrote {len(cameras)} cameras\")\n \n # Write images.bin\n write_images_binary(image_paths, cameras, poses, sparse_dir / 'images.bin')\n print(f\" ✓ Wrote {len(image_paths)} images\")\n \n # Write points3D.bin\n num_points = write_points3d_binary(pts3d, colors, sparse_dir / 'points3D.bin')\n print(f\" ✓ Wrote {num_points} 3D points\")\n \n print(f\"\\n✓ COLMAP reconstruction saved to {sparse_dir}\")\n print(f\" Cameras: {len(cameras)}\")\n print(f\" Images: {len(image_paths)}\")\n print(f\" Points: {num_points}\")\n \n return sparse_dir\n\n\ndef write_cameras_binary(cameras, output_file):\n \"\"\"Write cameras.bin in COLMAP binary format\"\"\"\n with open(output_file, 'wb') as f:\n # Write number of cameras\n f.write(struct.pack('Q', len(cameras)))\n \n for i, cam in enumerate(cameras):\n camera_id = cam.get('camera_id', i + 1)\n \n # Model ID: 1 = PINHOLE\n model_id = 1\n width = cam['width']\n height = cam['height']\n params = cam['params'] # [fx, fy, cx, cy]\n \n f.write(struct.pack('i', camera_id))\n f.write(struct.pack('i', model_id))\n f.write(struct.pack('Q', width))\n f.write(struct.pack('Q', height))\n \n # Write 4 parameters for PINHOLE model\n for param in params[:4]:\n f.write(struct.pack('d', param))\n\n\ndef write_images_binary(image_paths, cameras, poses, output_file):\n \"\"\"Write images.bin in COLMAP binary format\"\"\"\n with open(output_file, 'wb') as f:\n # Write number of images\n f.write(struct.pack('Q', len(image_paths)))\n \n for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n image_id = i + 1\n camera_id = cameras[i].get('camera_id', i + 1)\n image_name = os.path.basename(img_path)\n \n # Extract rotation and translation\n R = pose[:3, :3]\n t = pose[:3, 3]\n \n # Convert rotation matrix to quaternion [w, x, y, z]\n qvec = rotmat2qvec(R)\n tvec = t\n \n # Write image data\n f.write(struct.pack('i', image_id))\n \n # Write quaternion (4 doubles)\n for q in qvec:\n f.write(struct.pack('d', float(q)))\n \n # Write translation vector (3 doubles)\n for tv in tvec:\n f.write(struct.pack('d', float(tv)))\n \n # Write camera ID\n f.write(struct.pack('i', camera_id))\n \n # Write image name (null-terminated string)\n f.write(image_name.encode('utf-8') + b'\\x00')\n \n # Write number of 2D points (0 for now, as we don't have 2D-3D correspondences)\n f.write(struct.pack('Q', 0))\n\n\ndef write_points3d_binary(pts3d, colors, output_file):\n \"\"\"Write points3D.bin in COLMAP binary format\"\"\"\n # Filter out invalid points\n valid_indices = []\n for i, pt in enumerate(pts3d):\n if not (np.isnan(pt).any() or np.isinf(pt).any()):\n valid_indices.append(i)\n \n with open(output_file, 'wb') as f:\n # Write number of points\n f.write(struct.pack('Q', len(valid_indices)))\n \n for idx, point_id in enumerate(valid_indices):\n pt = pts3d[point_id]\n color = colors[point_id]\n \n # Write point3D ID\n f.write(struct.pack('Q', point_id))\n \n # Write XYZ coordinates (3 doubles)\n for coord in pt:\n f.write(struct.pack('d', float(coord)))\n \n # Write RGB color (3 unsigned chars)\n col_int = (color * 255).astype(np.uint8)\n for c in col_int:\n f.write(struct.pack('B', int(c)))\n \n # Write error (1 double) - set to 0\n f.write(struct.pack('d', 0.0))\n \n # Write track length (number of images seeing this point)\n # Set to 0 as we don't have track information\n f.write(struct.pack('Q', 0))\n \n # Progress indicator\n if (idx + 1) % 1000000 == 0:\n print(f\" Wrote {idx + 1} / {len(valid_indices)} points...\")\n \n return len(valid_indices)\n\n\ndef rotmat2qvec(R):\n \"\"\"\n Convert rotation matrix to quaternion in COLMAP format [w, x, y, z]\n \n Args:\n R: 3x3 rotation matrix\n \n Returns:\n qvec: quaternion [w, x, y, z]\n \"\"\"\n # Ensure R is a numpy array\n R = np.asarray(R, dtype=np.float64)\n \n # Calculate trace\n trace = np.trace(R)\n \n if trace > 0:\n s = 0.5 / np.sqrt(trace + 1.0)\n w = 0.25 / s\n x = (R[2, 1] - R[1, 2]) * s\n y = (R[0, 2] - R[2, 0]) * s\n z = (R[1, 0] - R[0, 1]) * s\n elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n w = (R[2, 1] - R[1, 2]) / s\n x = 0.25 * s\n y = (R[0, 1] + R[1, 0]) / s\n z = (R[0, 2] + R[2, 0]) / s\n elif R[1, 1] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n w = (R[0, 2] - R[2, 0]) / s\n x = (R[0, 1] + R[1, 0]) / s\n y = 0.25 * s\n z = (R[1, 2] + R[2, 1]) / s\n else:\n s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n w = (R[1, 0] - R[0, 1]) / s\n x = (R[0, 2] + R[2, 0]) / s\n y = (R[1, 2] + R[2, 1]) / s\n z = 0.25 * s\n \n qvec = np.array([w, x, y, z], dtype=np.float64)\n \n # Normalize\n qvec = qvec / np.linalg.norm(qvec)\n \n return qvec","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 3: Gaussian Splatting Training\n# ============================================================================\n\ndef setup_gaussian_splatting():\n \"\"\"Setup Gaussian Splatting\"\"\"\n print(\"\\n=== Setting up Gaussian Splatting ===\")\n \n os.chdir('/kaggle/working')\n \n WORK_DIR = \"gaussian-splatting\"\n \n if not os.path.exists(WORK_DIR):\n print(\"Cloning Gaussian Splatting repository...\")\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"✓ Repository already exists\")\n \n os.chdir(WORK_DIR)\n \n # Install requirements\n print(\"Installing Gaussian Splatting requirements...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n \n # Build submodules\n print(\"\\n📦 Building Gaussian Splatting submodules...\")\n \n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n \n for name, repo in submodules.items():\n print(f\"\\n📦 Installing {name}...\")\n path = os.path.join(\"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n \n print(\"✓ Gaussian Splatting setup complete!\")\n\n\ndef train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=2000):\n \"\"\"Train Gaussian Splatting model\"\"\"\n print(\"\\n\" + \"=\"*70)\n print(\"Step 6: Training Gaussian Splatting\")\n print(\"=\"*70)\n \n print(\"\\n=== Training Gaussian Splatting ===\")\n \n # Reduce memory usage with smaller resolution\n cmd = [\n 'python', 'train.py',\n '-s', colmap_dir,\n '--images', image_dir,\n '-m', output_dir,\n '--iterations', str(iterations),\n '--test_iterations', '1000', str(iterations),\n '--save_iterations', '1000', str(iterations),\n '--resolution', '2', # Reduce resolution to 1/2\n '--densify_grad_threshold', '0.001', # Higher threshold = fewer Gaussians\n '--densification_interval', '200', # Less frequent densification\n '--opacity_reset_interval', '5000', # Less frequent reset\n ]\n \n print(f\"Command: {' '.join(cmd)}\\n\")\n \n result = subprocess.run(\n cmd,\n cwd='/kaggle/working/gaussian-splatting',\n capture_output=True,\n text=True\n )\n \n print(result.stdout)\n if result.stderr:\n print(\"STDERR:\", result.stderr)\n \n if result.returncode != 0:\n raise RuntimeError(\"Gaussian Splatting training failed\")\n \n # Check output\n if not os.path.exists(os.path.join(output_dir, f'point_cloud/iteration_{iterations}/point_cloud.ply')):\n raise RuntimeError(f\"Expected output not found at iteration {iterations}\")\n \n print(f\"\\n✓ Gaussian Splatting training completed successfully\")\n print(f\" Output: {output_dir}\")\n \n return output_dir","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Main Pipeline\n# ============================================================================\ndef main_pipeline(image_dir, output_dir, square_size=224, iterations=2000, \n max_images=None, max_pairs=10000, max_points=1000000):\n \"\"\"\n Main pipeline for DINO matching -> MASt3R -> Gaussian Splatting\n \n Args:\n image_dir: Directory containing input images\n output_dir: Directory for output files\n square_size: Size to resize images for processing\n iterations: Number of training iterations\n max_images: Maximum number of images to process (None = all)\n max_pairs: Maximum number of image pairs for matching\n max_points: Maximum number of 3D points to extract (default: 1M)\n \"\"\"\n os.makedirs(output_dir, exist_ok=True)\n\n setup_base_environment()\n clear_memory()\n \n setup_mast3r()\n clear_memory()\n \n setup_gaussian_splatting()\n clear_memory()\n \n # Step 1: Normalize images to biplet-square format\n print(\"\\n\" + \"=\"*70)\n print(\"Step 1: Biplet-Square Normalization\")\n print(\"=\"*70)\n \n processed_image_dir = os.path.join(output_dir, \"processed_images\")\n \n # Get original images first\n original_image_paths = sorted([\n os.path.join(image_dir, f)\n for f in os.listdir(image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n \n # Limit original images if specified\n if max_images and len(original_image_paths) > max_images:\n print(f\"\\n⚠️ Limiting to {max_images} original images\")\n original_image_paths = original_image_paths[:max_images]\n \n print(f\"Processing {len(original_image_paths)} original images → ~{len(original_image_paths)*2} after biplet-square\")\n \n # Only process the selected images\n temp_dir = os.path.join(output_dir, \"temp_originals\")\n os.makedirs(temp_dir, exist_ok=True)\n \n # Copy selected images to temp directory\n for img_path in original_image_paths:\n import shutil\n shutil.copy(img_path, temp_dir)\n \n # Process the temp directory\n normalize_image_sizes_biplet(\n input_dir=temp_dir,\n output_dir=processed_image_dir,\n size=square_size\n )\n \n # Clean up temp directory\n shutil.rmtree(temp_dir)\n \n # Get processed image paths\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n \n print(f\"\\n📸 Processing {len(image_paths)} images (after biplet-square)\")\n print(f\"⚠️ Will use maximum {max_pairs} pairs to save memory\")\n \n # Step 2: DINO-based pair selection\n print(\"\\n\" + \"=\"*70)\n print(\"Step 2: DINO Pair Selection\")\n print(\"=\"*70)\n \n pairs = get_image_pairs_dino(image_paths, max_pairs=max_pairs)\n clear_memory()\n \n print(f\"✓ Using {len(pairs)} pairs for reconstruction\")\n \n # Step 3: MASt3R reconstruction\n print(\"\\n\" + \"=\"*70)\n print(\"Step 3: MASt3R Reconstruction\")\n print(\"=\"*70)\n \n device = Config.DEVICE\n model = load_mast3r_model(device)\n \n scene, mast3r_images = run_mast3r_pairs(\n model, image_paths, pairs, device,\n max_pairs=None # Already limited in get_image_pairs_dino\n )\n \n # Clear model from memory\n del model\n clear_memory()\n \n # Step 4: Extract COLMAP-compatible data\n print(\"\\n\" + \"=\"*70)\n print(\"Step 4: Converting to COLMAP Format\")\n print(\"=\"*70)\n \n # Extract COLMAP-compatible data with point limit\n pts3d, colors, cameras, poses = extract_colmap_data(\n scene, image_paths, max_points=max_points \n )\n\n # Clear scene from memory\n del scene, mast3r_images\n clear_memory()\n \n # Step 5: Save COLMAP reconstruction\n colmap_dir = os.path.join(output_dir, 'colmap')\n sparse_dir = save_colmap_reconstruction(\n pts3d, colors, cameras, poses, image_paths, colmap_dir\n )\n \n # Clear reconstruction data\n del pts3d, colors, cameras, poses\n clear_memory()\n \n # Step 6: Train Gaussian Splatting\n print(\"\\n\" + \"=\"*70)\n print(\"Step 6: Training Gaussian Splatting\")\n print(\"=\"*70)\n \n gs_output = train_gaussian_splatting(\n colmap_dir=colmap_dir,\n image_dir=processed_image_dir,\n output_dir=output_dir,\n iterations=iterations\n )\n \n print(\"\\n\" + \"=\"*70)\n print(\"✅ Full Pipeline Successfully Completed!\")\n print(\"=\"*70)\n print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n \n return gs_output","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"if __name__ == \"__main__\":\n IMAGE_DIR = \"/kaggle/input/two-dogs/fountain80/fountain80\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n \n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024, \n iterations=4000, \n max_images=None,\n max_pairs=1000, \n max_points=100000 \n )\n\n print(f\"\\n{'='*70}\")\n print(\"Pipeline completed successfully!\")\n print(f\"{'='*70}\")\n print(f\"Gaussian Splatting output: {gs_output}\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"","metadata":{}},{"cell_type":"code","source":"","metadata":{"id":"VQsLeKY8Rl8Y"},"outputs":[],"execution_count":null}]}
|