Upload biplet-dino-mast3r-ps1-ps2-cp01.ipynb
Browse files
biplet-dino-mast3r-ps1-ps2-cp01.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":14598123,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31259,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU"},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"id":"yhVNR6GETKyA"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# =====================================================================\n# biplet_dino_mast3r_ps2_gs_colab_01.ipynb\n# ASMK を DINO に置き換えたバージョン\n# =====================================================================\n\n# =====================================================================\n# CELL 1: Install Dependencies\n# =====================================================================\n!pip install roma einops timm huggingface_hub\n!pip install opencv-python pillow tqdm pyaml cython plyfile\n!pip install pycolmap trimesh\n!pip install transformers==4.40.0 # DINOに必要\n!pip uninstall -y numpy scipy\n!pip install numpy==1.26.4 scipy==1.11.4\nbreak","metadata":{"trusted":true,"id":"6C3QGJD8TKyC","outputId":"b362f97d-fbc1-474f-f2cb-b84b565acdb9"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"id":"TPcj5qcmedBw","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# restart & run after\n# =====================================================================\n# CELL 2: Mount Drive and Verify\n# =====================================================================\n\n\nimport numpy as np\nprint(f\"✓ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version\n\ntry:\n import roma\n print(\"✓ roma is installed\")\nexcept ModuleNotFoundError:\n print(\"⚠️ roma not found, installing...\")\n !pip install roma\n import roma\n print(\"✓ roma installed\")\n\n# =====================================================================\n# CELL 3: Clone Repositories\n# =====================================================================\nimport os\nimport sys\n\n# MASt3Rをクローン\nif not os.path.exists('/kaggle/working/mast3r'):\n print(\"Cloning MASt3R repository...\")\n !git clone --recursive https://github.com/naver/mast3r.git /kaggle/working/mast3r\n print(\"✓ MASt3R cloned\")\nelse:\n print(\"✓ MASt3R already exists\")\n\n# DUSt3Rをクローン(MASt3R内に必要)\nif not os.path.exists('/kaggle/working/mast3r/dust3r'):\n print(\"Cloning DUSt3R repository...\")\n !git clone --recursive https://github.com/naver/dust3r.git /kaggle/working/mast3r/dust3r\n print(\"✓ DUSt3R cloned\")\nelse:\n print(\"✓ DUSt3R already exists\")\n\n# パスを追加\nsys.path.insert(0, '/kaggle/working/mast3r')\nsys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n\n# 確認\ntry:\n from dust3r.model import AsymmetricCroCo3DStereo\n print(\"✓ dust3r.model imported successfully\")\nexcept ImportError as e:\n print(f\"✗ Import error: {e}\")\n\n# croco(MASt3Rの依存関係)もクローン\nif not os.path.exists('/kaggle/working/mast3r/croco'):\n print(\"Cloning CroCo repository...\")\n !git clone --recursive https://github.com/naver/croco.git /kaggle/working/mast3r/croco\n print(\"✓ CroCo cloned\")\n\n\nprint(f\"✓ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"id":"OWJEB1oQTKyD","outputId":"fa123527-2b15-4fa5-8d3c-c830ccc43365","execution":{"iopub.status.busy":"2026-01-25T02:40:18.272939Z","iopub.execute_input":"2026-01-25T02:40:18.273196Z","iopub.status.idle":"2026-01-25T02:40:34.881128Z","shell.execute_reply.started":"2026-01-25T02:40:18.273166Z","shell.execute_reply":"2026-01-25T02:40:34.880398Z"}},"outputs":[{"name":"stdout","text":"✓ np: 2.0.2 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 2.0.2\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n⚠️ roma not found, installing...\nCollecting roma\n Downloading roma-1.5.4-py3-none-any.whl.metadata (5.5 kB)\nDownloading roma-1.5.4-py3-none-any.whl (25 kB)\nInstalling collected packages: roma\nSuccessfully installed roma-1.5.4\n✓ roma installed\nCloning MASt3R repository...\nCloning into '/kaggle/working/mast3r'...\nremote: Enumerating objects: 269, done.\u001b[K\nremote: Counting objects: 100% (170/170), done.\u001b[K\nremote: Compressing objects: 100% (61/61), done.\u001b[K\nremote: Total 269 (delta 115), reused 109 (delta 109), pack-reused 99 (from 1)\u001b[K\nReceiving objects: 100% (269/269), 3.59 MiB | 20.43 MiB/s, done.\nResolving deltas: 100% (151/151), done.\nSubmodule 'dust3r' (https://github.com/naver/dust3r) registered for path 'dust3r'\nCloning into '/kaggle/working/mast3r/dust3r'...\nremote: Enumerating objects: 611, done. \nremote: Total 611 (delta 0), reused 0 (delta 0), pack-reused 611 (from 1) \nReceiving objects: 100% (611/611), 756.60 KiB | 6.88 MiB/s, done.\nResolving deltas: 100% (355/355), done.\nSubmodule path 'dust3r': checked out '3cc8c88c413bb9e34c41db0e0eef99c2ee010b12'\nSubmodule 'croco' (https://github.com/naver/croco) registered for path 'dust3r/croco'\nCloning into '/kaggle/working/mast3r/dust3r/croco'...\nremote: Enumerating objects: 198, done. \nremote: Counting objects: 100% (89/89), done. \nremote: Compressing objects: 100% (55/55), done. \nremote: Total 198 (delta 56), reused 34 (delta 34), pack-reused 109 (from 1) \nReceiving objects: 100% (198/198), 403.77 KiB | 4.92 MiB/s, done.\nResolving deltas: 100% (95/95), done.\nSubmodule path 'dust3r/croco': checked out 'd7de0705845239092414480bd829228723bf20de'\n✓ MASt3R cloned\n✓ DUSt3R already exists\nWarning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead\n✓ dust3r.model imported successfully\nCloning CroCo repository...\nCloning into '/kaggle/working/mast3r/croco'...\nremote: Enumerating objects: 198, done.\u001b[K\nremote: Counting objects: 100% (89/89), done.\u001b[K\nremote: Compressing objects: 100% (55/55), done.\u001b[K\nremote: Total 198 (delta 56), reused 34 (delta 34), pack-reused 109 (from 1)\u001b[K\nReceiving objects: 100% (198/198), 403.77 KiB | 5.24 MiB/s, done.\nResolving deltas: 100% (95/95), done.\n✓ CroCo cloned\n✓ np: 2.0.2 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 2.0.2\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"# =====================================================================\n# CELL 5: Import Core Libraries and Configure Memory\n# =====================================================================\nimport os\nimport sys\nimport gc\nimport torch\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport torch.nn.functional as F\nimport shutil\nfrom PIL import Image\nfrom transformers import AutoImageProcessor, AutoModel\n\n# MEMORY MANAGEMENT\nos.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'\n\ndef clear_memory():\n \"\"\"メモリクリア関数\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n\n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n# CONFIGURATION\nclass Config:\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n MAST3R_WEIGHTS = \"naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\"\n DUST3R_WEIGHTS = \"naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\"\n\n # DINO設定\n DINO_MODEL = \"facebook/dinov2-base\"\n GLOBAL_TOPK = 20 # 各画像がペアを組む上位K個\n\n IMAGE_SIZE = 224","metadata":{"trusted":true,"id":"OWJEB1oQTKyD","outputId":"fa123527-2b15-4fa5-8d3c-c830ccc43365","execution":{"iopub.status.busy":"2026-01-25T02:40:34.883482Z","iopub.execute_input":"2026-01-25T02:40:34.883970Z","iopub.status.idle":"2026-01-25T02:41:01.073021Z","shell.execute_reply.started":"2026-01-25T02:40:34.883941Z","shell.execute_reply":"2026-01-25T02:41:01.072305Z"}},"outputs":[{"name":"stderr","text":"2026-01-25 02:40:43.336620: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1769308843.577053 55 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1769308843.646489 55 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nW0000 00:00:1769308844.208861 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1769308844.208903 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1769308844.208906 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1769308844.208908 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n","output_type":"stream"}],"execution_count":2},{"cell_type":"code","source":"# =====================================================================\n# CELL 6: Image Preprocessing Functions (Biplet)\n# =====================================================================\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = input_dir + \"_biplet\"\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"\\n=== Generating Biplet Crops ({size}x{size}) ===\")\n\n converted_count = 0\n size_stats = {}\n\n for img_file in tqdm(sorted(os.listdir(input_dir)), desc=\"Creating biplets\"):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\n✓ Biplet generation complete:\")\n print(f\" Source images: {converted_count}\")\n print(f\" Biplet crops generated: {converted_count * 2}\")\n print(f\" Original size distribution: {size_stats}\")\n\n return output_dir\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops\n\n# =====================================================================\n# CELL 7: Image Loading Function\n# =====================================================================\ndef load_images_from_directory(image_dir, max_images=200):\n \"\"\"ディレクトリから画像をロード\"\"\"\n print(f\"\\nLoading images from: {image_dir}\")\n\n valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}\n image_paths = []\n\n for ext in valid_extensions:\n image_paths.extend(sorted(Path(image_dir).glob(f'*{ext}')))\n image_paths.extend(sorted(Path(image_dir).glob(f'*{ext.upper()}')))\n\n image_paths = sorted(set(str(p) for p in image_paths))\n\n if len(image_paths) > max_images:\n print(f\"⚠️ Limiting from {len(image_paths)} to {max_images} images\")\n image_paths = image_paths[:max_images]\n\n print(f\"✓ Found {len(image_paths)} images\")\n return image_paths\n\n# =====================================================================\n# CELL 8: MASt3R Model Loading\n# =====================================================================\ndef load_mast3r_model(device):\n \"\"\"MASt3Rモデルをロード\"\"\"\n print(\"\\n=== Loading MASt3R Model ===\")\n\n if '/kaggle/working/mast3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r')\n if '/kaggle/working/mast3r/dust3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n\n from dust3r.model import AsymmetricCroCo3DStereo\n\n try:\n print(f\"Attempting to load: {Config.MAST3R_WEIGHTS}\")\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n print(\"✓ Loaded MASt3R model\")\n except Exception as e:\n print(f\"⚠️ Failed to load MASt3R: {e}\")\n print(f\"Trying DUSt3R instead: {Config.DUST3R_WEIGHTS}\")\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n print(\"✓ Loaded DUSt3R model as fallback\")\n\n model.eval()\n print(f\"✓ Model loaded on {device}\")\n return model","metadata":{"trusted":true,"id":"OWJEB1oQTKyD","outputId":"fa123527-2b15-4fa5-8d3c-c830ccc43365","execution":{"iopub.status.busy":"2026-01-25T02:41:01.074181Z","iopub.execute_input":"2026-01-25T02:41:01.074913Z","iopub.status.idle":"2026-01-25T02:41:01.090864Z","shell.execute_reply.started":"2026-01-25T02:41:01.074876Z","shell.execute_reply":"2026-01-25T02:41:01.090018Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"# =====================================================================\n# CELL 9: DINO Pair Selection (REPLACES ASMK)\n# =====================================================================\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors with memory management\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n batch_size = 4 # Small batch to save memory\n\n for i in tqdm(range(0, len(image_paths), batch_size), desc=\"DINO extraction\"):\n batch_paths = image_paths[i:i+batch_size]\n batch_imgs = []\n\n for img_path in batch_paths:\n img = load_torch_image(img_path, device)\n batch_imgs.append(img)\n\n batch_tensor = torch.cat(batch_imgs, dim=0)\n\n with torch.no_grad():\n inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n\n # Clear batch memory\n del batch_tensor, inputs, outputs, desc\n clear_memory()\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model, processor\n clear_memory()\n\n print(\"After DINO extraction:\")\n get_memory_info()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n # Remove duplicates\n pairs = list(set(pairs))\n\n return pairs\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n \"\"\"\n Select diverse pairs to ensure good image coverage\n \"\"\"\n import random\n random.seed(42)\n\n if len(pairs) <= max_pairs:\n return pairs\n\n print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n\n # Count how many times each image appears in pairs\n image_counts = {i: 0 for i in range(num_images)}\n for i, j in pairs:\n image_counts[i] += 1\n image_counts[j] += 1\n\n # Sort pairs by: prefer pairs with less-connected images\n def pair_score(pair):\n i, j = pair\n return image_counts[i] + image_counts[j]\n\n pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n pairs_scored.sort(key=lambda x: x[1])\n\n # Select pairs greedily to maximize coverage\n selected = []\n selected_images = set()\n\n # Phase 1: Select pairs that add new images\n for pair, score in pairs_scored:\n if len(selected) >= max_pairs:\n break\n i, j = pair\n if i not in selected_images or j not in selected_images:\n selected.append(pair)\n selected_images.add(i)\n selected_images.add(j)\n\n # Phase 2: Fill remaining slots\n if len(selected) < max_pairs:\n remaining = [p for p, s in pairs_scored if p not in selected]\n random.shuffle(remaining)\n selected.extend(remaining[:max_pairs - len(selected)])\n\n print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n\n return selected\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n \"\"\"DINO-based pair selection\"\"\"\n device = Config.DEVICE\n\n # DINO global features\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from DINO: {len(pairs)}\")\n\n # Apply intelligent pair selection if limit specified\n if max_pairs and len(pairs) > max_pairs:\n pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n\n return pairs","metadata":{"trusted":true,"id":"OWJEB1oQTKyD","outputId":"fa123527-2b15-4fa5-8d3c-c830ccc43365","execution":{"iopub.status.busy":"2026-01-25T02:41:01.091971Z","iopub.execute_input":"2026-01-25T02:41:01.092268Z","iopub.status.idle":"2026-01-25T02:41:01.155412Z","shell.execute_reply.started":"2026-01-25T02:41:01.092242Z","shell.execute_reply":"2026-01-25T02:41:01.154507Z"}},"outputs":[],"execution_count":4},{"cell_type":"code","source":"# =====================================================================\n# CELL 10: MASt3R Reconstruction\n# =====================================================================\ndef run_mast3r_pairs(model, image_paths, pairs, device, batch_size=1, max_pairs=None):\n \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n from dust3r.utils.image import load_images\n\n # Limit number of pairs if specified\n if max_pairs and len(pairs) > max_pairs:\n print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n step = max(1, len(pairs) // max_pairs)\n pairs = pairs[::step][:max_pairs]\n\n print(f\"Processing {len(pairs)} pairs...\")\n\n # Load images in smaller size\n print(f\"Loading {len(image_paths)} images at {Config.IMAGE_SIZE}x{Config.IMAGE_SIZE}...\")\n images = load_images(image_paths, size=Config.IMAGE_SIZE)\n\n print(f\"Loaded {len(images)} images\")\n print(\"After loading images:\")\n get_memory_info()\n\n # Create all image pairs\n print(f\"Creating {len(pairs)} image pairs...\")\n mast3r_pairs = []\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n mast3r_pairs.append((images[idx1], images[idx2]))\n\n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n\n # Run inference\n output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n\n del mast3r_pairs\n clear_memory()\n\n print(\"✓ MASt3R inference complete\")\n print(\"After inference:\")\n get_memory_info()\n\n # Global alignment\n print(\"Running global alignment...\")\n scene = global_aligner(\n output,\n device=device,\n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n\n del output\n clear_memory()\n\n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\",\n niter=50, # Reduced iterations\n schedule='cosine',\n lr=0.01\n )\n\n print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n\n return scene, images","metadata":{"trusted":true,"id":"OWJEB1oQTKyD","outputId":"fa123527-2b15-4fa5-8d3c-c830ccc43365","execution":{"iopub.status.busy":"2026-01-25T02:41:01.156509Z","iopub.execute_input":"2026-01-25T02:41:01.156957Z","iopub.status.idle":"2026-01-25T02:41:01.172261Z","shell.execute_reply.started":"2026-01-25T02:41:01.156921Z","shell.execute_reply":"2026-01-25T02:41:01.171599Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"# =====================================================================\n# CELL 11: Camera Parameter Extraction (修正版)\n# =====================================================================\ndef extract_camera_params_process2(scene, image_paths, conf_threshold=1.5):\n \"\"\"sceneからカメラパラメータと3D点を抽出(スケーリング修正版)\"\"\"\n print(\"\\n=== Extracting Camera Parameters ===\")\n\n cameras_dict = {}\n all_pts3d = []\n all_confidence = []\n\n try:\n if hasattr(scene, 'get_im_poses'):\n poses = scene.get_im_poses()\n elif hasattr(scene, 'im_poses'):\n poses = scene.im_poses\n else:\n poses = None\n\n if hasattr(scene, 'get_focals'):\n focals = scene.get_focals()\n elif hasattr(scene, 'im_focals'):\n focals = scene.im_focals\n else:\n focals = None\n\n if hasattr(scene, 'get_principal_points'):\n pps = scene.get_principal_points()\n elif hasattr(scene, 'im_pp'):\n pps = scene.im_pp\n else:\n pps = None\n except Exception as e:\n print(f\"⚠️ Error getting camera parameters: {e}\")\n poses = None\n focals = None\n pps = None\n\n # 【重要】MASt3Rの処理サイズ\n mast3r_size = 224.0\n\n n_images = min(len(poses) if poses is not None else len(image_paths), len(image_paths))\n\n for idx in range(n_images):\n img_name = os.path.basename(image_paths[idx])\n\n try:\n # 元画像のサイズを取得\n img = Image.open(image_paths[idx])\n W, H = img.size\n img.close()\n\n # スケール比を計算\n scale = W / mast3r_size\n\n # Poseを取得(camera-to-worldをworld-to-cameraに変換)\n if poses is not None and idx < len(poses):\n pose_c2w = poses[idx]\n if isinstance(pose_c2w, torch.Tensor):\n pose_c2w = pose_c2w.detach().cpu().numpy()\n if not isinstance(pose_c2w, np.ndarray) or pose_c2w.shape != (4, 4):\n pose_c2w = np.eye(4)\n\n # world-to-camera に変換\n pose = np.linalg.inv(pose_c2w)\n else:\n pose = np.eye(4)\n\n # Focalを取得してスケーリング\n if focals is not None and idx < len(focals):\n focal_mast3r = focals[idx]\n if isinstance(focal_mast3r, torch.Tensor):\n focal_mast3r = focal_mast3r.detach().cpu().item()\n else:\n focal_mast3r = float(focal_mast3r)\n\n # 🔧 スケーリング適用\n if focals.shape[1] == 1:\n # 等方性カメラ(fx = fy)\n focal = focal_mast3r * scale\n else:\n # 異方性カメラ\n focal = float(focals[idx, 0]) * scale\n else:\n focal = 1000.0\n\n # Principal pointを取得してスケーリング\n if pps is not None and idx < len(pps):\n pp_mast3r = pps[idx]\n if isinstance(pp_mast3r, torch.Tensor):\n pp_mast3r = pp_mast3r.detach().cpu().numpy()\n\n # 🔧 スケーリング適用\n pp = pp_mast3r * scale\n else:\n pp = np.array([W / 2.0, H / 2.0])\n\n # カメラパラメータを保存\n cameras_dict[img_name] = {\n 'focal': focal,\n 'pp': pp,\n 'pose': pose,\n 'rotation': pose[:3, :3],\n 'translation': pose[:3, 3],\n 'width': W,\n 'height': H\n }\n\n # デバッグ情報(最初の画像のみ)\n if idx == 0:\n print(f\"\\nExample camera 0:\")\n print(f\" Image size: {W}x{H}\")\n print(f\" MASt3R size: {mast3r_size}\")\n print(f\" Scale factor: {scale:.3f}\")\n print(f\" MASt3R focal: {focal_mast3r:.2f}\")\n print(f\" Scaled focal: {focal:.2f}\")\n print(f\" MASt3R pp: [{pp_mast3r[0]:.2f}, {pp_mast3r[1]:.2f}]\")\n print(f\" Scaled pp: [{pp[0]:.2f}, {pp[1]:.2f}]\")\n\n # 3D点を取得\n if hasattr(scene, 'im_pts3d') and idx < len(scene.im_pts3d):\n pts3d_img = scene.im_pts3d[idx]\n elif hasattr(scene, 'get_pts3d'):\n pts3d_all = scene.get_pts3d()\n if idx < len(pts3d_all):\n pts3d_img = pts3d_all[idx]\n else:\n pts3d_img = None\n else:\n pts3d_img = None\n\n # Confidenceを取得\n if hasattr(scene, 'im_conf') and idx < len(scene.im_conf):\n conf_img = scene.im_conf[idx]\n elif hasattr(scene, 'get_conf'):\n conf_all = scene.get_conf()\n if idx < len(conf_all):\n conf_img = conf_all[idx]\n else:\n conf_img = None\n else:\n conf_img = None\n\n # 3D点とconfidenceを処理\n if pts3d_img is not None:\n if isinstance(pts3d_img, torch.Tensor):\n pts3d_img = pts3d_img.detach().cpu().numpy()\n\n if pts3d_img.ndim == 3:\n pts3d_flat = pts3d_img.reshape(-1, 3)\n else:\n pts3d_flat = pts3d_img\n\n all_pts3d.append(pts3d_flat)\n\n # confidenceを処理\n if conf_img is not None:\n if isinstance(conf_img, list):\n conf_img = np.array(conf_img)\n elif isinstance(conf_img, torch.Tensor):\n conf_img = conf_img.detach().cpu().numpy()\n\n if conf_img.ndim > 1:\n conf_flat = conf_img.reshape(-1)\n else:\n conf_flat = conf_img\n\n if len(conf_flat) != len(pts3d_flat):\n conf_flat = np.ones(len(pts3d_flat))\n\n all_confidence.append(conf_flat)\n else:\n all_confidence.append(np.ones(len(pts3d_flat)))\n\n except Exception as e:\n print(f\"⚠️ Error processing image {idx} ({img_name}): {e}\")\n # ��フォルト値でもスケーリングを適用\n img = Image.open(image_paths[idx])\n W, H = img.size\n img.close()\n\n cameras_dict[img_name] = {\n 'focal': 1000.0 * (W / mast3r_size),\n 'pp': np.array([W / 2.0, H / 2.0]),\n 'pose': np.eye(4),\n 'rotation': np.eye(3),\n 'translation': np.zeros(3),\n 'width': W,\n 'height': H\n }\n continue\n\n # 全3D点を結合\n if all_pts3d:\n pts3d = np.vstack(all_pts3d)\n confidence = np.concatenate(all_confidence)\n else:\n pts3d = np.zeros((0, 3))\n confidence = np.zeros(0)\n\n print(f\"✓ Extracted camera parameters for {len(cameras_dict)} cameras\")\n print(f\"✓ Total 3D points: {len(pts3d)}\")\n\n # Confidenceでフィルタリング\n if len(confidence) > 0:\n valid_mask = confidence > conf_threshold\n pts3d = pts3d[valid_mask]\n confidence = confidence[valid_mask]\n print(f\"✓ After confidence filtering (>{conf_threshold}): {len(pts3d)} points\")\n\n return cameras_dict, pts3d, confidence\n","metadata":{"id":"YSt2RDqmviUa","trusted":true,"execution":{"iopub.status.busy":"2026-01-25T02:41:01.173300Z","iopub.execute_input":"2026-01-25T02:41:01.173719Z","iopub.status.idle":"2026-01-25T02:41:01.195158Z","shell.execute_reply.started":"2026-01-25T02:41:01.173694Z","shell.execute_reply":"2026-01-25T02:41:01.194519Z"}},"outputs":[],"execution_count":6},{"cell_type":"code","source":"# =====================================================================\n# CELL 12: COLMAP Export Functions (PINHOLE版)\n# =====================================================================\n\nimport struct\nimport numpy as np\nfrom pathlib import Path\n\ndef rotmat_to_qvec(R):\n \"\"\"回転行列をクォータニオンに変換\"\"\"\n R = np.asarray(R, dtype=np.float64)\n trace = np.trace(R)\n\n if trace > 0:\n s = 0.5 / np.sqrt(trace + 1.0)\n w = 0.25 / s\n x = (R[2, 1] - R[1, 2]) * s\n y = (R[0, 2] - R[2, 0]) * s\n z = (R[1, 0] - R[0, 1]) * s\n elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n w = (R[2, 1] - R[1, 2]) / s\n x = 0.25 * s\n y = (R[0, 1] + R[1, 0]) / s\n z = (R[0, 2] + R[2, 0]) / s\n elif R[1, 1] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n w = (R[0, 2] - R[2, 0]) / s\n x = (R[0, 1] + R[1, 0]) / s\n y = 0.25 * s\n z = (R[1, 2] + R[2, 1]) / s\n else:\n s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n w = (R[1, 0] - R[0, 1]) / s\n x = (R[0, 2] + R[2, 0]) / s\n y = (R[1, 2] + R[2, 1]) / s\n z = 0.25 * s\n\n qvec = np.array([w, x, y, z], dtype=np.float64)\n qvec = qvec / np.linalg.norm(qvec)\n\n return qvec\n\n\ndef write_cameras_binary(cameras_dict, image_size, output_file):\n \"\"\"\n cameras.binを出力(PINHOLEモデル使用)\n \"\"\"\n width, height = image_size\n num_cameras = len(cameras_dict)\n\n # COLMAP camera models\n PINHOLE = 1 # 🔧 SIMPLE_PINHOLE (0) から PINHOLE (1) に変更\n\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', num_cameras))\n\n for camera_id, (img_id, cam_params) in enumerate(cameras_dict.items(), start=1):\n focal = cam_params['focal']\n\n # PINHOLEの場合: fx, fy, cx, cy\n fx = fy = focal # 等方性カメラを仮定\n\n # Principal pointを取得(存在しない場合は中心)\n if 'pp' in cam_params:\n pp = cam_params['pp']\n cx = float(pp[0])\n cy = float(pp[1])\n else:\n cx = width / 2.0\n cy = height / 2.0\n\n # camera_id\n f.write(struct.pack('I', camera_id))\n # model_id (PINHOLE = 1)\n f.write(struct.pack('i', PINHOLE))\n # width\n f.write(struct.pack('Q', width))\n # height\n f.write(struct.pack('Q', height))\n # params: fx, fy, cx, cy (4パラメータ)\n f.write(struct.pack('d', fx))\n f.write(struct.pack('d', fy))\n f.write(struct.pack('d', cx))\n f.write(struct.pack('d', cy))\n\n print(f\"COLMAP cameras.bin saved to {output_file}\")\n\n\ndef write_images_binary(cameras_dict, output_file):\n \"\"\"images.binを出力\"\"\"\n num_images = len(cameras_dict)\n\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', num_images))\n\n for image_id, (img_id, cam_params) in enumerate(cameras_dict.items(), start=1):\n R = cam_params['rotation']\n quat = rotmat_to_qvec(R)\n t = cam_params['translation']\n camera_id = image_id\n\n f.write(struct.pack('I', image_id))\n for q in quat:\n f.write(struct.pack('d', q))\n for ti in t:\n f.write(struct.pack('d', ti))\n f.write(struct.pack('I', camera_id))\n\n name_bytes = img_id.encode('utf-8') + b'\\x00'\n f.write(name_bytes)\n f.write(struct.pack('Q', 0))\n\n print(f\"COLMAP images.bin saved to {output_file}\")\n\n\ndef write_points3D_binary(pts3d, confidence, output_file):\n \"\"\"points3D.binを出力\"\"\"\n num_points = len(pts3d)\n\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', num_points))\n\n for point_id, pt in enumerate(pts3d, start=1):\n x, y, z = pt\n\n f.write(struct.pack('Q', point_id))\n f.write(struct.pack('d', x))\n f.write(struct.pack('d', y))\n f.write(struct.pack('d', z))\n\n # RGB (グレー)\n f.write(struct.pack('B', 128))\n f.write(struct.pack('B', 128))\n f.write(struct.pack('B', 128))\n\n # error\n if confidence is not None and point_id <= len(confidence):\n error = 1.0 / max(confidence[point_id-1], 0.001)\n else:\n error = 1.0\n f.write(struct.pack('d', error))\n\n # track_length\n f.write(struct.pack('Q', 0))\n\n print(f\"COLMAP points3D.bin saved to {output_file}\")\n\n\ndef export_colmap_binary(cameras_dict, pts3d, confidence, image_size, output_dir):\n \"\"\"COLMAPバイナリファイルを出力\"\"\"\n output_path = Path(output_dir)\n output_path.mkdir(parents=True, exist_ok=True)\n\n write_cameras_binary(\n cameras_dict,\n image_size,\n output_path / 'cameras.bin'\n )\n\n write_images_binary(\n cameras_dict,\n output_path / 'images.bin'\n )\n\n write_points3D_binary(\n pts3d,\n confidence,\n output_path / 'points3D.bin'\n )\n\n print(f\"\\nCOLMAP binary files exported to {output_dir}/\")\n print(f\" - cameras.bin: {len(cameras_dict)} cameras (PINHOLE model)\")\n print(f\" - images.bin: {len(cameras_dict)} images\")\n print(f\" - points3D.bin: {len(pts3d)} points\")","metadata":{"id":"jNk5C0k1zkLD","trusted":true,"execution":{"iopub.status.busy":"2026-01-25T02:41:01.197274Z","iopub.execute_input":"2026-01-25T02:41:01.197581Z","iopub.status.idle":"2026-01-25T02:41:01.218478Z","shell.execute_reply.started":"2026-01-25T02:41:01.197551Z","shell.execute_reply":"2026-01-25T02:41:01.217805Z"}},"outputs":[],"execution_count":7},{"cell_type":"code","source":"","metadata":{"id":"gDbmwRKsEkYi","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# =====================================================================\n# CELL 20: Traditional Method Functions (for comparison)\n# =====================================================================\nimport struct\nimport numpy as np\nfrom pathlib import Path\n\n# ===== 従来法: extract_colmap_data =====\ndef extract_colmap_data_traditional(scene, image_paths, max_points=1000000):\n \"\"\"\n 従来法: MASt3Rシーンから COLMAP互換データを抽出\n (dino-mast3r-gs-kg-34oo.ipynb からの抽出)\n \"\"\"\n print(\"\\n=== [TRADITIONAL] Extracting COLMAP-compatible data ===\")\n\n # Extract point cloud\n pts_all = scene.get_pts3d()\n print(f\"pts_all type: {type(pts_all)}\")\n\n if isinstance(pts_all, list):\n print(f\"pts_all is a list with {len(pts_all)} elements\")\n if len(pts_all) > 0:\n print(f\"First element type: {type(pts_all[0])}\")\n if hasattr(pts_all[0], 'shape'):\n print(f\"First element shape: {pts_all[0].shape}\")\n\n pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p)\n for p in pts_all])\n print(f\"pts_all shape after conversion: {pts_all.shape}\")\n\n if len(pts_all.shape) == 4:\n print(f\"Found batched point cloud: {pts_all.shape}\")\n B, H, W, _ = pts_all.shape\n pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy()\n\n # Extract colors\n colors = []\n for img_path in image_paths:\n img = Image.open(img_path).resize((W, H))\n colors.append(np.array(img))\n colors = np.stack(colors).reshape(-1, 3) / 255.0\n else:\n pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n colors = np.ones((len(pts3d), 3)) * 0.5\n\n print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n\n # Downsample points\n if len(pts3d) > max_points:\n print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points...\")\n valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n pts3d_valid = pts3d[valid_mask]\n colors_valid = colors[valid_mask]\n indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n pts3d = pts3d_valid[indices]\n colors = colors_valid[indices]\n print(f\"✓ Downsampled to {len(pts3d)} points\")\n\n # Extract camera parameters\n print(\"Extracting camera parameters...\")\n\n # 【重要】camera-to-world を world-to-camera に変換\n poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n\n poses = []\n for i, pose_c2w in enumerate(poses_c2w):\n pose_w2c = np.linalg.inv(pose_c2w)\n poses.append(pose_w2c)\n poses = np.array(poses)\n print(f\"Converted to world-to-camera poses for COLMAP\")\n\n focals = scene.get_focals().detach().cpu().numpy()\n pp = scene.get_principal_points().detach().cpu().numpy()\n print(f\"Focals shape: {focals.shape}\")\n print(f\"Principal points shape: {pp.shape}\")\n\n mast3r_size = 224.0\n\n cameras = []\n for i, img_path in enumerate(image_paths):\n img = Image.open(img_path)\n W, H = img.size\n scale = W / mast3r_size\n\n if focals.shape[1] == 1:\n focal_mast3r = float(focals[i, 0])\n fx = fy = focal_mast3r * scale\n else:\n fx = float(focals[i, 0]) * scale\n fy = float(focals[i, 1]) * scale\n\n cx = float(pp[i, 0]) * scale\n cy = float(pp[i, 1]) * scale\n\n camera = {\n 'camera_id': i + 1,\n 'model': 'PINHOLE',\n 'width': W,\n 'height': H,\n 'params': [fx, fy, cx, cy]\n }\n cameras.append(camera)\n\n if i == 0:\n print(f\"\\nExample camera 0:\")\n print(f\" Image size: {W}x{H}\")\n print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n print(f\" Pose (first row): {poses[i][0]}\")\n\n print(f\"\\n✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n\n return pts3d, colors, cameras, poses\n\n\n# ===== 従来法: rotmat2qvec =====\ndef rotmat2qvec_traditional(R):\n \"\"\"従来法: 回転行列をクォータニオンに変換\"\"\"\n R = np.asarray(R, dtype=np.float64)\n trace = np.trace(R)\n\n if trace > 0:\n s = 0.5 / np.sqrt(trace + 1.0)\n w = 0.25 / s\n x = (R[2, 1] - R[1, 2]) * s\n y = (R[0, 2] - R[2, 0]) * s\n z = (R[1, 0] - R[0, 1]) * s\n elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n w = (R[2, 1] - R[1, 2]) / s\n x = 0.25 * s\n y = (R[0, 1] + R[1, 0]) / s\n z = (R[0, 2] + R[2, 0]) / s\n elif R[1, 1] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n w = (R[0, 2] - R[2, 0]) / s\n x = (R[0, 1] + R[1, 0]) / s\n y = 0.25 * s\n z = (R[1, 2] + R[2, 1]) / s\n else:\n s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n w = (R[1, 0] - R[0, 1]) / s\n x = (R[0, 2] + R[2, 0]) / s\n y = (R[1, 2] + R[2, 1]) / s\n z = 0.25 * s\n\n qvec = np.array([w, x, y, z], dtype=np.float64)\n qvec = qvec / np.linalg.norm(qvec)\n\n return qvec\n\n\n# ===== 従来法: save関数群 =====\ndef write_cameras_binary_traditional(cameras, output_file):\n \"\"\"従来法: cameras.binを書き込み\"\"\"\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', len(cameras)))\n\n for i, cam in enumerate(cameras):\n camera_id = cam.get('camera_id', i + 1)\n model_id = 1 # PINHOLE\n width = cam['width']\n height = cam['height']\n params = cam['params']\n\n f.write(struct.pack('i', camera_id))\n f.write(struct.pack('i', model_id))\n f.write(struct.pack('Q', width))\n f.write(struct.pack('Q', height))\n\n for param in params[:4]:\n f.write(struct.pack('d', param))\n\n\ndef write_images_binary_traditional(image_paths, cameras, poses, output_file):\n \"\"\"従来法: images.binを書き込み\"\"\"\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', len(image_paths)))\n\n for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n image_id = i + 1\n camera_id = cameras[i].get('camera_id', i + 1)\n image_name = os.path.basename(img_path)\n\n R = pose[:3, :3]\n t = pose[:3, 3]\n qvec = rotmat2qvec_traditional(R)\n tvec = t\n\n f.write(struct.pack('i', image_id))\n for q in qvec:\n f.write(struct.pack('d', float(q)))\n for tv in tvec:\n f.write(struct.pack('d', float(tv)))\n f.write(struct.pack('i', camera_id))\n f.write(image_name.encode('utf-8') + b'\\x00')\n f.write(struct.pack('Q', 0))\n\n\ndef write_points3d_binary_traditional(pts3d, colors, output_file):\n \"\"\"従来法: points3D.binを書き込み\"\"\"\n valid_indices = []\n for i, pt in enumerate(pts3d):\n if not (np.isnan(pt).any() or np.isinf(pt).any()):\n valid_indices.append(i)\n\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', len(valid_indices)))\n\n for idx, point_id in enumerate(valid_indices):\n pt = pts3d[point_id]\n color = colors[point_id]\n\n f.write(struct.pack('Q', point_id))\n for coord in pt:\n f.write(struct.pack('d', float(coord)))\n\n col_int = (color * 255).astype(np.uint8)\n for c in col_int:\n f.write(struct.pack('B', int(c)))\n\n f.write(struct.pack('d', 0.0))\n f.write(struct.pack('Q', 0))\n\n return len(valid_indices)\n\n\ndef save_colmap_reconstruction_traditional(pts3d, colors, cameras, poses, image_paths, output_dir):\n \"\"\"従来法: COLMAP再構成を保存\"\"\"\n print(\"\\n=== [TRADITIONAL] Saving COLMAP reconstruction ===\")\n\n sparse_dir = Path(output_dir) / 'sparse_traditional' / '0'\n sparse_dir.mkdir(parents=True, exist_ok=True)\n\n write_cameras_binary_traditional(cameras, sparse_dir / 'cameras.bin')\n print(f\" ✓ Wrote {len(cameras)} cameras\")\n\n write_images_binary_traditional(image_paths, cameras, poses, sparse_dir / 'images.bin')\n print(f\" ✓ Wrote {len(image_paths)} images\")\n\n num_points = write_points3d_binary_traditional(pts3d, colors, sparse_dir / 'points3D.bin')\n print(f\" ✓ Wrote {num_points} 3D points\")\n\n print(f\"\\n✓ Traditional COLMAP reconstruction saved to {sparse_dir}\")\n\n return sparse_dir","metadata":{"id":"kIrrlZXQEkSA","trusted":true,"execution":{"iopub.status.busy":"2026-01-25T02:41:01.219607Z","iopub.execute_input":"2026-01-25T02:41:01.219981Z","iopub.status.idle":"2026-01-25T02:41:01.250918Z","shell.execute_reply.started":"2026-01-25T02:41:01.219948Z","shell.execute_reply":"2026-01-25T02:41:01.250218Z"}},"outputs":[],"execution_count":8},{"cell_type":"code","source":"# =====================================================================\n# CELL 21: Convert BIN to CSV for Easy Comparison\n# =====================================================================\nimport pandas as pd\nimport struct\n\ndef bin_to_csv_cameras(bin_file, csv_file):\n \"\"\"cameras.bin → CSV\"\"\"\n data = []\n with open(bin_file, 'rb') as f:\n num_cameras = struct.unpack('Q', f.read(8))[0]\n for _ in range(num_cameras):\n camera_id = struct.unpack('i', f.read(4))[0]\n model_id = struct.unpack('i', f.read(4))[0]\n width = struct.unpack('Q', f.read(8))[0]\n height = struct.unpack('Q', f.read(8))[0]\n\n # PINHOLE: 4 params\n if model_id == 1:\n params = struct.unpack('dddd', f.read(32))\n # SIMPLE_PINHOLE: 3 params\n else:\n params = struct.unpack('ddd', f.read(24))\n\n data.append({\n 'camera_id': camera_id,\n 'model_id': model_id,\n 'width': width,\n 'height': height,\n 'fx': params[0] if len(params) >= 1 else None,\n 'fy': params[1] if len(params) >= 2 else params[0] if len(params) == 1 else None,\n 'cx': params[2] if len(params) >= 3 else None,\n 'cy': params[3] if len(params) >= 4 else None\n })\n\n df = pd.DataFrame(data)\n df.to_csv(csv_file, index=False)\n print(f\"✓ Cameras CSV saved: {csv_file}\")\n return df\n\n\ndef bin_to_csv_images(bin_file, csv_file):\n \"\"\"images.bin → CSV\"\"\"\n data = []\n with open(bin_file, 'rb') as f:\n num_images = struct.unpack('Q', f.read(8))[0]\n for _ in range(num_images):\n image_id = struct.unpack('i', f.read(4))[0]\n qvec = struct.unpack('dddd', f.read(32))\n tvec = struct.unpack('ddd', f.read(24))\n camera_id = struct.unpack('i', f.read(4))[0]\n\n name = b''\n while True:\n char = f.read(1)\n if char == b'\\x00':\n break\n name += char\n name = name.decode('utf-8')\n\n num_points2D = struct.unpack('Q', f.read(8))[0]\n f.read(num_points2D * 24)\n\n data.append({\n 'image_id': image_id,\n 'qw': qvec[0],\n 'qx': qvec[1],\n 'qy': qvec[2],\n 'qz': qvec[3],\n 'tx': tvec[0],\n 'ty': tvec[1],\n 'tz': tvec[2],\n 'camera_id': camera_id,\n 'name': name\n })\n\n df = pd.DataFrame(data)\n df.to_csv(csv_file, index=False)\n print(f\"✓ Images CSV saved: {csv_file}\")\n return df\n\n\ndef bin_to_csv_points3d(bin_file, csv_file, max_rows=10000):\n \"\"\"points3D.bin → CSV (サンプリング)\"\"\"\n data = []\n with open(bin_file, 'rb') as f:\n num_points = struct.unpack('Q', f.read(8))[0]\n\n # サンプリング間隔を計算\n step = max(1, num_points // max_rows)\n\n for i in range(num_points):\n point_id = struct.unpack('Q', f.read(8))[0]\n xyz = struct.unpack('ddd', f.read(24))\n rgb = struct.unpack('BBB', f.read(3))\n error = struct.unpack('d', f.read(8))[0]\n track_length = struct.unpack('Q', f.read(8))[0]\n f.read(track_length * 8)\n\n # サンプリング\n if i % step == 0:\n data.append({\n 'point_id': point_id,\n 'x': xyz[0],\n 'y': xyz[1],\n 'z': xyz[2],\n 'r': rgb[0],\n 'g': rgb[1],\n 'b': rgb[2],\n 'error': error\n })\n\n df = pd.DataFrame(data)\n df.to_csv(csv_file, index=False)\n print(f\"✓ Points3D CSV saved: {csv_file} (sampled {len(df)} / {num_points} points)\")\n return df\n\n\ndef convert_colmap_bins_to_csv(sparse_dir, output_prefix):\n \"\"\"全BINファイルをCSVに変換\"\"\"\n print(f\"\\n=== Converting {sparse_dir} to CSV ===\")\n\n cameras_df = bin_to_csv_cameras(\n os.path.join(sparse_dir, 'cameras.bin'),\n f\"{output_prefix}_cameras.csv\"\n )\n\n images_df = bin_to_csv_images(\n os.path.join(sparse_dir, 'images.bin'),\n f\"{output_prefix}_images.csv\"\n )\n\n points_df = bin_to_csv_points3d(\n os.path.join(sparse_dir, 'points3D.bin'),\n f\"{output_prefix}_points3d.csv\",\n max_rows=10000\n )\n\n return cameras_df, images_df, points_df","metadata":{"id":"c7A05pXLFt2E","trusted":true,"execution":{"iopub.status.busy":"2026-01-25T02:41:01.252069Z","iopub.execute_input":"2026-01-25T02:41:01.252763Z","iopub.status.idle":"2026-01-25T02:41:01.269998Z","shell.execute_reply.started":"2026-01-25T02:41:01.252736Z","shell.execute_reply":"2026-01-25T02:41:01.269345Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"# =====================================================================\n# CELL 22: Comparison Function\n# =====================================================================\n\ndef compare_extraction_methods(scene, image_paths, output_dir, conf_threshold=0.5, max_points=100000):\n \"\"\"\n 新方式と従来法の両方でCOLMAP形式を出力し、比較する\n\n Args:\n scene: MASt3Rのシーンオブジェクト\n image_paths: 画像パスのリスト\n output_dir: 出力ディレクトリ\n conf_threshold: 信頼度閾値(新方式用)\n max_points: 最大点数(従来法用)\n\n Returns:\n dict: 比較結果の辞書\n \"\"\"\n print(\"\\n\" + \"=\"*70)\n print(\"COMPARISON: New vs Traditional Extraction Methods\")\n print(\"=\"*70)\n\n # ===== METHOD 1: 新方式 (extract_camera_params_process2) =====\n print(\"\\n--- METHOD 1: Current Implementation (extract_camera_params_process2) ---\")\n\n cameras_dict_new, pts3d_new, confidence_new = extract_camera_params_process2(\n scene=scene,\n image_paths=image_paths,\n conf_threshold=conf_threshold\n )\n\n # 画像サイズを取得\n first_img = Image.open(image_paths[0])\n image_size = (first_img.width, first_img.height)\n first_img.close()\n\n # 新方式のBIN保存\n sparse_dir_new = os.path.join(output_dir, \"sparse_new/0\")\n os.makedirs(sparse_dir_new, exist_ok=True)\n\n export_colmap_binary(\n cameras_dict=cameras_dict_new,\n pts3d=pts3d_new,\n confidence=confidence_new,\n image_size=image_size,\n output_dir=sparse_dir_new\n )\n\n \n # ===== METHOD 2: 従来法 (extract_colmap_data_traditional) =====\n print(\"\\n--- METHOD 2: Traditional Implementation (extract_colmap_data) ---\")\n\n pts3d_trad, colors_trad, cameras_trad, poses_trad = extract_colmap_data_traditional(\n scene=scene,\n image_paths=image_paths,\n max_points=max_points\n )\n\n # 従来法のBIN保存\n sparse_dir_trad = save_colmap_reconstruction_traditional(\n pts3d=pts3d_trad,\n colors=colors_trad,\n cameras=cameras_trad,\n poses=poses_trad,\n image_paths=image_paths,\n output_dir=output_dir\n )\n\n # ===== CSVに変換 =====\n print(\"\\n\" + \"=\"*70)\n print(\"Converting to CSV for comparison\")\n print(\"=\"*70)\n\n csv_prefix_new = os.path.join(output_dir, \"comparison_new\")\n csv_prefix_trad = os.path.join(output_dir, \"comparison_traditional\")\n\n cam_new, img_new, pts_new = convert_colmap_bins_to_csv(\n sparse_dir_new,\n csv_prefix_new\n )\n\n cam_trad, img_trad, pts_trad = convert_colmap_bins_to_csv(\n str(sparse_dir_trad),\n csv_prefix_trad\n )\n\n # ===== 比較サマリー =====\n print(\"\\n\" + \"=\"*70)\n print(\"COMPARISON SUMMARY\")\n print(\"=\"*70)\n\n comparison_results = {\n 'cameras': {\n 'new_count': len(cam_new),\n 'trad_count': len(cam_trad),\n 'new_focal': float(cam_new.iloc[0]['fx']) if len(cam_new) > 0 else None,\n 'trad_focal': float(cam_trad.iloc[0]['fx']) if len(cam_trad) > 0 else None,\n },\n 'images': {\n 'new_count': len(img_new),\n 'trad_count': len(img_trad),\n 'new_tvec': [float(img_new.iloc[0]['tx']), float(img_new.iloc[0]['ty']), float(img_new.iloc[0]['tz'])] if len(img_new) > 0 else None,\n 'trad_tvec': [float(img_trad.iloc[0]['tx']), float(img_trad.iloc[0]['ty']), float(img_trad.iloc[0]['tz'])] if len(img_trad) > 0 else None,\n },\n 'points': {\n 'new_count': len(pts_new),\n 'trad_count': len(pts_trad),\n 'new_center': [float(pts_new['x'].mean()), float(pts_new['y'].mean()), float(pts_new['z'].mean())] if len(pts_new) > 0 else None,\n 'trad_center': [float(pts_trad['x'].mean()), float(pts_trad['y'].mean()), float(pts_trad['z'].mean())] if len(pts_trad) > 0 else None,\n }\n }\n\n # 結果を表示\n print(\"\\nCAMERAS:\")\n print(f\" New method: {comparison_results['cameras']['new_count']} cameras\")\n print(f\" Traditional method: {comparison_results['cameras']['trad_count']} cameras\")\n if comparison_results['cameras']['new_focal'] and comparison_results['cameras']['trad_focal']:\n print(f\"\\n Sample focal lengths:\")\n print(f\" New: fx={comparison_results['cameras']['new_focal']:.2f}\")\n print(f\" Traditional: fx={comparison_results['cameras']['trad_focal']:.2f}\")\n focal_diff = abs(comparison_results['cameras']['new_focal'] - comparison_results['cameras']['trad_focal'])\n print(f\" Difference: {focal_diff:.2f}\")\n\n print(\"\\nIMAGES:\")\n print(f\" New method: {comparison_results['images']['new_count']} images\")\n print(f\" Traditional method: {comparison_results['images']['trad_count']} images\")\n if comparison_results['images']['new_tvec'] and comparison_results['images']['trad_tvec']:\n print(f\"\\n Sample translation (first image):\")\n print(f\" New: {comparison_results['images']['new_tvec']}\")\n print(f\" Traditional: {comparison_results['images']['trad_tvec']}\")\n tvec_diff = np.linalg.norm(\n np.array(comparison_results['images']['new_tvec']) -\n np.array(comparison_results['images']['trad_tvec'])\n )\n print(f\" Distance: {tvec_diff:.3f}\")\n\n print(\"\\nPOINTS3D:\")\n print(f\" New method: {comparison_results['points']['new_count']} points (sampled)\")\n print(f\" Traditional method: {comparison_results['points']['trad_count']} points (sampled)\")\n if comparison_results['points']['new_center'] and comparison_results['points']['trad_center']:\n print(f\"\\n Center of points:\")\n print(f\" New: {comparison_results['points']['new_center']}\")\n print(f\" Traditional: {comparison_results['points']['trad_center']}\")\n center_diff = np.linalg.norm(\n np.array(comparison_results['points']['new_center']) -\n np.array(comparison_results['points']['trad_center'])\n )\n print(f\" Distance: {center_diff:.3f}\")\n\n print(\"\\n\" + \"=\"*70)\n print(\"CSV FILES SAVED:\")\n print(\"=\"*70)\n print(f\" New method:\")\n print(f\" - {csv_prefix_new}_cameras.csv\")\n print(f\" - {csv_prefix_new}_images.csv\")\n print(f\" - {csv_prefix_new}_points3d.csv\")\n print(f\" Traditional method:\")\n print(f\" - {csv_prefix_trad}_cameras.csv\")\n print(f\" - {csv_prefix_trad}_images.csv\")\n print(f\" - {csv_prefix_trad}_points3d.csv\")\n\n print(\"\\n✓ Comparison complete! Review CSV files for detailed analysis.\")\n\n return comparison_results","metadata":{"id":"SN1a_CbWEkIg","trusted":true,"execution":{"iopub.status.busy":"2026-01-25T02:41:01.271030Z","iopub.execute_input":"2026-01-25T02:41:01.271317Z","iopub.status.idle":"2026-01-25T02:41:01.289984Z","shell.execute_reply.started":"2026-01-25T02:41:01.271282Z","shell.execute_reply":"2026-01-25T02:41:01.289236Z"}},"outputs":[],"execution_count":10},{"cell_type":"code","source":"","metadata":{"id":"o0n2RL3Ep5_Y","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# =====================================================================\n# CELL 14: Main Pipeline\n# =====================================================================\n\ndef main_pipeline(image_dir, output_dir, square_size=1024, iterations=30000,\n max_images=200, max_pairs=100, max_points=500000,\n conf_threshold=1.001, preprocess_mode='none'):\n \"\"\"メインパイプライン(DINO + CELL 11/12対応版)\"\"\"\n\n # STEP 0: Image Preprocessing\n if preprocess_mode == 'biplet':\n print(\"=\"*70)\n print(\"STEP 0: Image Preprocessing (Biplet Crops)\")\n print(\"=\"*70)\n\n temp_biplet_dir = os.path.join(output_dir, \"temp_biplet\")\n biplet_dir = normalize_image_sizes_biplet(image_dir, temp_biplet_dir, size=square_size)\n\n images_dir = os.path.join(output_dir, \"images\")\n os.makedirs(images_dir, exist_ok=True)\n\n biplet_suffixes = ['_left', '_right', '_top', '_bottom']\n copied_count = 0\n\n for img_file in os.listdir(temp_biplet_dir):\n if any(suffix in img_file for suffix in biplet_suffixes):\n src = os.path.join(temp_biplet_dir, img_file)\n dst = os.path.join(images_dir, img_file)\n shutil.copy2(src, dst)\n copied_count += 1\n\n print(f\"✓ Copied {copied_count} biplet images to {images_dir}\")\n\n original_images_dir = os.path.join(output_dir, \"original_images\")\n os.makedirs(original_images_dir, exist_ok=True)\n\n original_count = 0\n valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp')\n for img_file in os.listdir(image_dir):\n if img_file.lower().endswith(valid_extensions):\n src = os.path.join(image_dir, img_file)\n dst = os.path.join(original_images_dir, img_file)\n shutil.copy2(src, dst)\n original_count += 1\n\n print(f\"✓ Saved {original_count} original images to {original_images_dir}\")\n shutil.rmtree(temp_biplet_dir)\n image_dir = images_dir\n clear_memory()\n else:\n images_dir = os.path.join(output_dir, \"images\")\n if not os.path.exists(images_dir):\n print(\"=\"*70)\n print(\"STEP 0: Copying images to output directory\")\n print(\"=\"*70)\n shutil.copytree(image_dir, images_dir)\n print(f\"✓ Copied images to {images_dir}\")\n image_dir = images_dir\n\n # STEP 1: Loading Images\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: Loading and Preparing Images\")\n print(\"=\"*70)\n\n image_paths = load_images_from_directory(image_dir, max_images=max_images)\n print(f\"Loaded {len(image_paths)} images\")\n clear_memory()\n\n # STEP 2: Image Pair Selection (DINO)\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Image Pair Selection (DINO)\")\n print(\"=\"*70)\n\n max_pairs = min(max_pairs, 50)\n pairs = get_image_pairs_dino(image_paths, max_pairs=max_pairs)\n print(f\"Selected {len(pairs)} image pairs\")\n clear_memory()\n\n # STEP 3: MASt3R 3D Reconstruction\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: MASt3R 3D Reconstruction\")\n print(\"=\"*70)\n\n device = Config.DEVICE\n model = load_mast3r_model(device)\n scene, mast3r_images = run_mast3r_pairs(model, image_paths, pairs, device)\n\n del model\n clear_memory()\n\n\n\n # STEP 4: Converting to COLMAP (CELL 11/12使用)\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Converting to COLMAP (PINHOLE)\")\n print(\"=\"*70)\n\n # 画像ファイル名のリストを作成\n image_names = [os.path.basename(p) for p in image_paths]\n\n # CELL 11: カメラパラメータの抽出(修正版関数を使用)\n cameras_dict, pts3d, confidence = extract_camera_params_process2(\n scene=scene,\n image_paths=image_paths,\n conf_threshold=conf_threshold\n )\n\n print(f\"Extracted {len(cameras_dict)} cameras with conf >= {conf_threshold}\")\n\n # 画像サイズを取得(最初の画像から)\n from PIL import Image\n first_img = Image.open(image_paths[0])\n image_size = (first_img.width, first_img.height)\n first_img.close()\n\n # COLMAP出力ディレクトリ\n colmap_dir = os.path.join(output_dir, \"sparse/0\")\n os.makedirs(colmap_dir, exist_ok=True)\n\n # CELL 12: COLMAPバイナリ形式でエクスポート(修正版関数を使用)\n export_colmap_binary(\n cameras_dict=cameras_dict,\n pts3d=pts3d,\n confidence=confidence,\n image_size=image_size,\n output_dir=colmap_dir\n )\n \n comparison_results = compare_extraction_methods(\n scene=scene,\n image_paths=image_paths,\n output_dir=\"/kaggle/working/output\",\n conf_threshold=0.5,\n max_points=1000000\n )\n\n","metadata":{"trusted":true,"id":"U7Lk41hLTKyF","execution":{"iopub.status.busy":"2026-01-25T02:41:01.290987Z","iopub.execute_input":"2026-01-25T02:41:01.291293Z","iopub.status.idle":"2026-01-25T02:41:01.308228Z","shell.execute_reply.started":"2026-01-25T02:41:01.291260Z","shell.execute_reply":"2026-01-25T02:41:01.307588Z"}},"outputs":[],"execution_count":11},{"cell_type":"code","source":"# =====================================================================\n# CELL 15: Run Pipeline\n# =====================================================================\nif __name__ == \"__main__\":\n IMAGE_DIR = \"/kaggle/input/two-dogs/fountain40\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n\n\n main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024,\n iterations=1000,\n max_images=40,\n max_pairs=1000,\n max_points=1000000,\n conf_threshold=0.5,\n preprocess_mode='biplet' # or 'none'\n )\n\n","metadata":{"trusted":true,"id":"_-8kDLieTKyG","outputId":"beafd1de-a25c-4273-dfcb-10ca5301abb7","execution":{"iopub.status.busy":"2026-01-25T02:41:01.309239Z","iopub.execute_input":"2026-01-25T02:41:01.309599Z","iopub.status.idle":"2026-01-25T02:42:29.432868Z","shell.execute_reply.started":"2026-01-25T02:41:01.309563Z","shell.execute_reply":"2026-01-25T02:42:29.432000Z"}},"outputs":[{"name":"stdout","text":"======================================================================\nSTEP 0: Image Preprocessing (Biplet Crops)\n======================================================================\n\n=== Generating Biplet Crops (1024x1024) ===\n","output_type":"stream"},{"name":"stderr","text":"Creating biplets: 100%|██████████| 40/40 [00:06<00:00, 6.58it/s]\n","output_type":"stream"},{"name":"stdout","text":"\n✓ Biplet generation complete:\n Source images: 40\n Biplet crops generated: 80\n Original size distribution: {'1440x1920': 40}\n✓ Copied 80 biplet images to /kaggle/working/output/images\n✓ Saved 40 original images to /kaggle/working/output/original_images\n\n======================================================================\nSTEP 1: Loading and Preparing Images\n======================================================================\n\nLoading images from: /kaggle/working/output/images\n⚠️ Limiting from 80 to 40 images\n✓ Found 40 images\nLoaded 40 images\n\n======================================================================\nSTEP 2: Image Pair Selection (DINO)\n======================================================================\n\n=== Extracting DINO Global Features ===\nInitial memory state:\nGPU Memory - Allocated: 0.00GB, Reserved: 0.00GB\nCPU Memory Usage: 6.8%\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"preprocessor_config.json: 0%| | 0.00/436 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"d4f6d65a727c4ee49ad6fdb41de62a1c"}},"metadata":{}},{"name":"stderr","text":"Using a slow image processor as `use_fast` is unset and a slow processor was saved with this model. `use_fast=True` will be the default behavior in v4.52, even if the model was saved with a slow processor. This will result in minor differences in outputs. You'll still be able to use a slow processor with `use_fast=False`.\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/548 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"eb3e3a61ec144027a0f431ce09ec5870"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"model.safetensors: 0%| | 0.00/346M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"d19fcb2d2ffd4083b22ad530edcc8ced"}},"metadata":{}},{"name":"stderr","text":"DINO extraction: 100%|██████████| 10/10 [00:10<00:00, 1.02s/it]\n","output_type":"stream"},{"name":"stdout","text":"After DINO extraction:\nGPU Memory - Allocated: 0.05GB, Reserved: 0.07GB\nCPU Memory Usage: 8.0%\nInitial pairs from DINO: 395\nSelecting 50 diverse pairs from 395 candidates...\nSelected pairs cover 40 / 40 images (100.0%)\nSelected 50 image pairs\n\n======================================================================\nSTEP 3: MASt3R 3D Reconstruction\n======================================================================\n\n=== Loading MASt3R Model ===\nAttempting to load: naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/546 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7caf325843494b7bbdb57fbc155c0c26"}},"metadata":{}},{"name":"stdout","text":"⚠️ Failed to load MASt3R: tried to load naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric from huggingface, but failed\nTrying DUSt3R instead: naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/450 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"04de3f58638f49198e28758d54f1d11b"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"model.safetensors: 0%| | 0.00/2.28G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"1512a1abbe6543e59bcff834e578156e"}},"metadata":{}},{"name":"stdout","text":"✓ Loaded DUSt3R model as fallback\n✓ Model loaded on cuda\n\n=== Running MASt3R Reconstruction ===\nInitial memory state:\nGPU Memory - Allocated: 2.14GB, Reserved: 2.25GB\nCPU Memory Usage: 15.4%\n/!\\ module trimesh is not installed, cannot visualize results /!\\\nProcessing 50 pairs...\nLoading 40 images at 224x224...\n>> Loading a list of 40 images\n - adding /kaggle/working/output/images/image_001_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_001_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_002_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_002_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_003_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_003_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_004_bottom.jpeg with resolution 1024x1024 --> 224x224\n","output_type":"stream"},{"name":"stderr","text":"/kaggle/working/mast3r/dust3r/dust3r/cloud_opt/base_opt.py:275: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n @torch.cuda.amp.autocast(enabled=False)\n","output_type":"stream"},{"name":"stdout","text":" - adding /kaggle/working/output/images/image_004_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_005_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_005_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_006_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_006_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_007_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_007_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_008_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_008_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_009_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_009_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_010_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_010_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_011_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_011_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_012_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_012_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_013_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_013_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_014_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_014_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_015_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_015_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_016_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_016_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_017_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_017_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_018_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_018_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_019_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_019_top.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_020_bottom.jpeg with resolution 1024x1024 --> 224x224\n - adding /kaggle/working/output/images/image_020_top.jpeg with resolution 1024x1024 --> 224x224\n (Found 40 images)\nLoaded 40 images\nAfter loading images:\nGPU Memory - Allocated: 2.14GB, Reserved: 2.25GB\nCPU Memory Usage: 15.4%\nCreating 50 image pairs...\n","output_type":"stream"},{"name":"stderr","text":"Preparing pairs: 100%|██████████| 50/50 [00:00<00:00, 373823.89it/s]\n","output_type":"stream"},{"name":"stdout","text":"Running MASt3R inference on 50 pairs...\n>> Inference with model on 50 image pairs\n","output_type":"stream"},{"name":"stderr","text":" 0%| | 0/50 [00:00<?, ?it/s]/kaggle/working/mast3r/dust3r/dust3r/inference.py:44: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=bool(use_amp)):\n/kaggle/working/mast3r/dust3r/dust3r/model.py:206: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n/kaggle/working/mast3r/dust3r/dust3r/inference.py:48: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n100%|██████████| 50/50 [00:10<00:00, 4.68it/s]\n","output_type":"stream"},{"name":"stdout","text":"✓ MASt3R inference complete\nAfter inference:\nGPU Memory - Allocated: 2.14GB, Reserved: 2.25GB\nCPU Memory Usage: 15.3%\nRunning global alignment...\nComputing global alignment...\n init edge (0*,16*) score=np.float64(44.60002517700195)\n init edge (6*,16) score=np.float64(20.98052215576172)\n init edge (4*,16) score=np.float64(19.74667739868164)\n init edge (10*,16) score=np.float64(19.293317794799805)\n init edge (12*,16) score=np.float64(17.656387329101562)\n init edge (8*,16) score=np.float64(16.112581253051758)\n init edge (2*,16) score=np.float64(15.698960304260254)\n init edge (9*,12) score=np.float64(14.307255744934082)\n init edge (16,18*) score=np.float64(12.891231536865234)\n init edge (0,5*) score=np.float64(11.348612785339355)\n init edge (9,15*) score=np.float64(10.28466796875)\n init edge (10,11*) score=np.float64(5.987066745758057)\n init edge (2,14*) score=np.float64(17.447965621948242)\n init edge (7*,18) score=np.float64(15.877448081970215)\n init edge (15,33*) score=np.float64(10.359328269958496)\n init edge (14,32*) score=np.float64(23.232666015625)\n init edge (25*,32) score=np.float64(19.815927505493164)\n init edge (30*,32) score=np.float64(15.164072036743164)\n init edge (30,34*) score=np.float64(14.744616508483887)\n init edge (24*,30) score=np.float64(11.129143714904785)\n init edge (30,39*) score=np.float64(10.864166259765625)\n init edge (24,27*) score=np.float64(7.825746059417725)\n init edge (30,31*) score=np.float64(6.444053649902344)\n init edge (25,37*) score=np.float64(20.014677047729492)\n init edge (30,36*) score=np.float64(19.37171745300293)\n init edge (30,38*) score=np.float64(19.233217239379883)\n init edge (21*,37) score=np.float64(18.236967086791992)\n init edge (23*,37) score=np.float64(18.08700180053711)\n init edge (19*,37) score=np.float64(16.084945678710938)\n init edge (26*,38) score=np.float64(15.977051734924316)\n init edge (3*,21) score=np.float64(15.387314796447754)\n init edge (22*,24) score=np.float64(13.66196060180664)\n init edge (3,35*) score=np.float64(12.696600914001465)\n init edge (1*,35) score=np.float64(12.07399845123291)\n init edge (28*,38) score=np.float64(10.643644332885742)\n init edge (17*,37) score=np.float64(6.972182273864746)\n init edge (29*,37) score=np.float64(6.877660274505615)\n init edge (20*,38) score=np.float64(4.943045139312744)\n init edge (13*,35) score=np.float64(17.546167373657227)\n init loss = 0.018458731472492218\nGlobal alignement - optimizing for:\n['pw_poses', 'im_depthmaps', 'im_poses', 'im_focals']\n","output_type":"stream"},{"name":"stderr","text":" 0%| | 0/50 [00:00<?, ?it/s]/kaggle/working/mast3r/dust3r/dust3r/cloud_opt/base_opt.py:366: UserWarning: Converting a tensor with requires_grad=True to a scalar may lead to unexpected behavior.\nConsider using tensor.detach() first. (Triggered internally at /pytorch/torch/csrc/autograd/generated/python_variable_methods.cpp:835.)\n return float(loss), lr\n100%|██████████| 50/50 [00:02<00:00, 18.89it/s, lr=1.08654e-05 loss=0.0124874]\n","output_type":"stream"},{"name":"stdout","text":"✓ Global alignment complete (final loss: 0.012487)\nFinal memory state:\nGPU Memory - Allocated: 2.34GB, Reserved: 2.86GB\nCPU Memory Usage: 15.6%\n\n======================================================================\nSTEP 4: Converting to COLMAP (PINHOLE)\n======================================================================\n\n=== Extracting Camera Parameters ===\n\nExample camera 0:\n Image size: 1024x1024\n MASt3R size: 224.0\n Scale factor: 4.571\n MASt3R focal: 386.99\n Scaled focal: 1769.09\n MASt3R pp: [112.00, 112.00]\n Scaled pp: [512.00, 512.00]\n✓ Extracted camera parameters for 40 cameras\n✓ Total 3D points: 2007040\n✓ After confidence filtering (>0.5): 2007040 points\nExtracted 40 cameras with conf >= 0.5\nCOLMAP cameras.bin saved to /kaggle/working/output/sparse/0/cameras.bin\nCOLMAP images.bin saved to /kaggle/working/output/sparse/0/images.bin\nCOLMAP points3D.bin saved to /kaggle/working/output/sparse/0/points3D.bin\n\nCOLMAP binary files exported to /kaggle/working/output/sparse/0/\n - cameras.bin: 40 cameras (PINHOLE model)\n - images.bin: 40 images\n - points3D.bin: 2007040 points\n\n======================================================================\nCOMPARISON: New vs Traditional Extraction Methods\n======================================================================\n\n--- METHOD 1: Current Implementation (extract_camera_params_process2) ---\n\n=== Extracting Camera Parameters ===\n\nExample camera 0:\n Image size: 1024x1024\n MASt3R size: 224.0\n Scale factor: 4.571\n MASt3R focal: 386.99\n Scaled focal: 1769.09\n MASt3R pp: [112.00, 112.00]\n Scaled pp: [512.00, 512.00]\n✓ Extracted camera parameters for 40 cameras\n✓ Total 3D points: 2007040\n✓ After confidence filtering (>0.5): 2007040 points\nCOLMAP cameras.bin saved to /kaggle/working/output/sparse_new/0/cameras.bin\nCOLMAP images.bin saved to /kaggle/working/output/sparse_new/0/images.bin\nCOLMAP points3D.bin saved to /kaggle/working/output/sparse_new/0/points3D.bin\n\nCOLMAP binary files exported to /kaggle/working/output/sparse_new/0/\n - cameras.bin: 40 cameras (PINHOLE model)\n - images.bin: 40 images\n - points3D.bin: 2007040 points\n\n--- METHOD 2: Traditional Implementation (extract_colmap_data) ---\n\n=== [TRADITIONAL] Extracting COLMAP-compatible data ===\npts_all type: <class 'list'>\npts_all is a list with 40 elements\nFirst element type: <class 'torch.Tensor'>\nFirst element shape: torch.Size([224, 224, 3])\npts_all shape after conversion: torch.Size([40, 224, 224, 3])\nFound batched point cloud: torch.Size([40, 224, 224, 3])\n✓ Extracted 2007040 3D points from 40 images\n\n⚠ Downsampling from 2007040 to 1000000 points...\n✓ Downsampled to 1000000 points\nExtracting camera parameters...\nRetrieved camera-to-world poses: shape (40, 4, 4)\nConverted to world-to-camera poses for COLMAP\nFocals shape: (40, 1)\nPrincipal points shape: (40, 2)\n\nExample camera 0:\n Image size: 1024x1024\n MASt3R focal: 386.99, pp: (112.00, 112.00)\n Scaled fx=1769.09, fy=1769.09, cx=512.00, cy=512.00\n Pose (first row): [ 9.9999821e-01 -1.7473311e-03 -7.5700833e-04 0.0000000e+00]\n\n✓ Extracted 40 cameras and 40 poses\n\n=== [TRADITIONAL] Saving COLMAP reconstruction ===\n ✓ Wrote 40 cameras\n ✓ Wrote 40 images\n ✓ Wrote 1000000 3D points\n\n✓ Traditional COLMAP reconstruction saved to /kaggle/working/output/sparse_traditional/0\n\n======================================================================\nConverting to CSV for comparison\n======================================================================\n\n=== Converting /kaggle/working/output/sparse_new/0 to CSV ===\n✓ Cameras CSV saved: /kaggle/working/output/comparison_new_cameras.csv\n✓ Images CSV saved: /kaggle/working/output/comparison_new_images.csv\n✓ Points3D CSV saved: /kaggle/working/output/comparison_new_points3d.csv (sampled 10036 / 2007040 points)\n\n=== Converting /kaggle/working/output/sparse_traditional/0 to CSV ===\n✓ Cameras CSV saved: /kaggle/working/output/comparison_traditional_cameras.csv\n✓ Images CSV saved: /kaggle/working/output/comparison_traditional_images.csv\n✓ Points3D CSV saved: /kaggle/working/output/comparison_traditional_points3d.csv (sampled 10000 / 1000000 points)\n\n======================================================================\nCOMPARISON SUMMARY\n======================================================================\n\nCAMERAS:\n New method: 40 cameras\n Traditional method: 40 cameras\n\n Sample focal lengths:\n New: fx=1769.09\n Traditional: fx=1769.09\n Difference: 0.00\n\nIMAGES:\n New method: 40 images\n Traditional method: 40 images\n\n Sample translation (first image):\n New: [0.0, 0.0, 0.0]\n Traditional: [0.0, 0.0, 0.0]\n Distance: 0.000\n\nPOINTS3D:\n New method: 10036 points (sampled)\n Traditional method: 10000 points (sampled)\n\n Center of points:\n New: [0.039150189647130934, -0.024783492704790706, 0.3044062349819101]\n Traditional: [0.0414237033680809, -0.024715829414328618, 0.304460435988009]\n Distance: 0.002\n\n======================================================================\nCSV FILES SAVED:\n======================================================================\n New method:\n - /kaggle/working/output/comparison_new_cameras.csv\n - /kaggle/working/output/comparison_new_images.csv\n - /kaggle/working/output/comparison_new_points3d.csv\n Traditional method:\n - /kaggle/working/output/comparison_traditional_cameras.csv\n - /kaggle/working/output/comparison_traditional_images.csv\n - /kaggle/working/output/comparison_traditional_points3d.csv\n\n✓ Comparison complete! Review CSV files for detailed analysis.\n","output_type":"stream"}],"execution_count":12},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
|