stpete2 commited on
Commit
566e7e3
·
verified ·
1 Parent(s): 616dbcc

Delete asmk-mast3r-ps2-gs-kg-06.ipynb

Browse files
Files changed (1) hide show
  1. asmk-mast3r-ps2-gs-kg-06.ipynb +0 -1
asmk-mast3r-ps2-gs-kg-06.ipynb DELETED
@@ -1 +0,0 @@
1
- {"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU","kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":14554378,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31236,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **asmk-mast3r-ps2-gs-kg** \n\n","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"https://www.kaggle.com/code/stpeteishii/dino-mast3r-gs-kg-34","metadata":{}},{"cell_type":"code","source":"!pip install opencv-python pillow tqdm pyaml cython\n!pip install pycolmap trimesh\n!pip uninstall -y numpy scipy\n!pip install numpy==1.26.4 scipy==1.11.4\n\nbreak","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# restart, then run after","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T09:37:56.941463Z","iopub.execute_input":"2026-01-21T09:37:56.942169Z","iopub.status.idle":"2026-01-21T09:37:56.946289Z","shell.execute_reply.started":"2026-01-21T09:37:56.942136Z","shell.execute_reply":"2026-01-21T09:37:56.945365Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"import numpy as np\nprint(f\"✓ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T09:37:56.947810Z","iopub.execute_input":"2026-01-21T09:37:56.948732Z","iopub.status.idle":"2026-01-21T09:37:59.242479Z","shell.execute_reply.started":"2026-01-21T09:37:56.948691Z","shell.execute_reply":"2026-01-21T09:37:59.241513Z"}},"outputs":[{"name":"stdout","text":"✓ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 1.26.4\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n","output_type":"stream"}],"execution_count":2},{"cell_type":"code","source":"\"\"\"\nComplete MASt3R to Gaussian Splatting Pipeline\nProcess2 Only - Memory Optimized Version\n\"\"\"\n\nimport os\nimport sys\nimport gc\nimport torch\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport torch.nn.functional as F\n\n# ======================================================================\n# MEMORY MANAGEMENT\n# ======================================================================\n\nos.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'\n\ndef clear_memory():\n \"\"\"メモリクリア関数\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\n\n# ======================================================================\n# CONFIGURATION\n# ======================================================================\n\nclass Config:\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # 正しいMASt3Rモデル名\n MAST3R_WEIGHTS = \"naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\"\n DUST3R_WEIGHTS = \"naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\" # フォールバック用\n RETRIEVAL_TOPK = 10\n IMAGE_SIZE = 224 # メモリ節約のため224に設定\n\n\n# ======================================================================\n# IMAGE LOADING\n# ======================================================================\n\ndef load_images_from_directory(image_dir, max_images=200):\n \"\"\"ディレクトリから画像をロード\"\"\"\n print(f\"\\nLoading images from: {image_dir}\")\n \n valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}\n image_paths = []\n \n for ext in valid_extensions:\n image_paths.extend(sorted(Path(image_dir).glob(f'*{ext}')))\n image_paths.extend(sorted(Path(image_dir).glob(f'*{ext.upper()}')))\n \n image_paths = sorted(set(str(p) for p in image_paths))\n \n if len(image_paths) > max_images:\n print(f\"⚠️ Limiting from {len(image_paths)} to {max_images} images\")\n image_paths = image_paths[:max_images]\n \n print(f\"✓ Found {len(image_paths)} images\")\n return image_paths\n\n\n# ======================================================================\n# MAST3R MODEL\n# ======================================================================\n\ndef load_mast3r_model(device):\n \"\"\"MASt3Rモデルをロード\"\"\"\n print(\"\\n=== Loading MASt3R Model ===\")\n \n # mast3rのパスを追加\n if '/kaggle/working/mast3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r')\n if '/kaggle/working/mast3r/dust3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n \n from dust3r.model import AsymmetricCroCo3DStereo\n \n try:\n # MASt3Rモデルを試す\n print(f\"Attempting to load: {Config.MAST3R_WEIGHTS}\")\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n print(\"✓ Loaded MASt3R model\")\n except Exception as e:\n print(f\"⚠️ Failed to load MASt3R: {e}\")\n print(f\"Trying DUSt3R instead: {Config.DUST3R_WEIGHTS}\")\n try:\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n print(\"✓ Loaded DUSt3R model as fallback\")\n except Exception as e2:\n print(f\"⚠️ Failed to load DUSt3R: {e2}\")\n raise Exception(\"Could not load any model. Please check model names and internet connection.\")\n \n model.eval()\n \n print(f\"✓ Model loaded on {device}\")\n return model\n\n\n# ======================================================================\n# FEATURE EXTRACTION & PAIR SELECTION\n# ======================================================================\n\ndef load_asmk_retrieval_model(device):\n \"\"\"ASMKリトリーバルモデルをロード\"\"\"\n print(\"\\n=== Loading ASMK Retrieval Model ===\")\n \n # mast3rとasmkのパスを追加\n if '/kaggle/working/mast3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r')\n if '/kaggle/working/mast3r/dust3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n if '/kaggle/working/asmk' not in sys.path:\n sys.path.insert(0, '/kaggle/working/asmk')\n \n from dust3r.model import AsymmetricCroCo3DStereo\n \n try:\n # MASt3Rモデルを試す\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n print(\"✓ Loaded MASt3R model for retrieval\")\n except Exception as e:\n print(f\"⚠️ Failed to load MASt3R: {e}\")\n print(f\"Trying DUSt3R instead\")\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n print(\"✓ Loaded DUSt3R model for retrieval\")\n \n model.eval()\n \n # Codebookの初期化(簡易版)\n codebook = np.random.randn(1024, 24).astype(np.float32)\n \n print(\"✓ ASMK model loaded\")\n return model, codebook\n\n\ndef extract_mast3r_features(model, image_paths, device, batch_size=1):\n \"\"\"MASt3Rモデルを使用して特徴量を抽出(ペア画像として処理)\"\"\"\n print(\"\\n=== Extracting MASt3R Features ===\")\n from dust3r.utils.image import load_images\n from dust3r.inference import inference\n \n all_features = []\n \n # 各画像を自分自身とペアにして処理\n for i in tqdm(range(len(image_paths)), desc=\"Features\"):\n img_path = image_paths[i]\n \n # 同じ画像を2回ロード(ペアとして)\n images = load_images([img_path, img_path], size=Config.IMAGE_SIZE)\n \n # ペア形式で推論\n pairs = [(images[0], images[1])]\n \n with torch.no_grad():\n output = inference(pairs, model, device, batch_size=1)\n \n # outputの構造を確認してデータを抽出\n try:\n # outputはタプル (view1, view2) の形式\n if isinstance(output, tuple) and len(output) == 2:\n view1, view2 = output\n # view1から特徴量を取得\n if isinstance(view1, dict):\n if 'desc' in view1:\n desc = view1['desc']\n elif 'pts3d' in view1:\n desc = view1['pts3d']\n else:\n # 最初の値を使用\n desc = list(view1.values())[0]\n else:\n desc = view1\n # outputがリストの場合\n elif isinstance(output, list):\n if len(output) > 0:\n item = output[0]\n if isinstance(item, dict):\n if 'desc' in item:\n desc = item['desc']\n elif 'pts3d' in item:\n desc = item['pts3d']\n else:\n desc = list(item.values())[0]\n else:\n desc = item\n else:\n raise ValueError(\"Empty output\")\n else:\n # その他の形式\n desc = output\n \n # テンソルの次元を調整\n if isinstance(desc, torch.Tensor):\n if desc.dim() == 4:\n desc = desc.squeeze(0) # [1, H, W, C] -> [H, W, C]\n elif desc.dim() == 2:\n # [H*W, C] の場合、適切な形状に変換\n h = w = int(np.sqrt(desc.shape[0]))\n if h * w == desc.shape[0]:\n desc = desc.reshape(h, w, desc.shape[1])\n \n all_features.append(desc)\n \n except Exception as e:\n print(f\"⚠️ Error extracting features for image {i}: {e}\")\n print(f\" Output type: {type(output)}\")\n if isinstance(output, (list, tuple)):\n print(f\" Output length: {len(output)}\")\n if len(output) > 0:\n print(f\" First item type: {type(output[0])}\")\n if isinstance(output[0], dict):\n print(f\" Keys: {output[0].keys()}\")\n # デフォルトの特徴量を追加\n all_features.append(torch.zeros((Config.IMAGE_SIZE, Config.IMAGE_SIZE, 24)))\n \n # メモリクリア\n del output, images, pairs\n if i % 10 == 0:\n torch.cuda.empty_cache()\n \n print(f\"✓ Extracted features for {len(all_features)} images\")\n if all_features:\n feat_shape = all_features[0].shape if isinstance(all_features[0], torch.Tensor) else all_features[0].shape\n print(f\" Feature shape: {feat_shape}\")\n \n return all_features\n\n\ndef compute_asmk_similarity(features, codebook):\n \"\"\"ASMKを使用して類似度行列を計算\"\"\"\n print(\"\\n=== Computing ASMK Similarity ===\")\n \n n_images = len(features)\n similarity_matrix = np.zeros((n_images, n_images), dtype=np.float32)\n \n # 各特徴量をグローバル記述子に変換\n global_features = []\n \n for feat in features:\n if isinstance(feat, torch.Tensor):\n feat = feat.cpu().numpy()\n \n h, w, c = feat.shape\n feat_flat = feat.reshape(-1, c)\n global_desc = np.mean(feat_flat, axis=0)\n global_features.append(global_desc)\n \n global_features = np.stack(global_features)\n \n if codebook is not None and len(codebook) > 0:\n try:\n print(f\"Using ASMK with codebook size: {len(codebook)}\")\n \n for i in range(n_images):\n feat_i = features[i]\n if isinstance(feat_i, torch.Tensor):\n feat_i = feat_i.cpu().numpy()\n feat_i = feat_i.reshape(-1, feat_i.shape[-1])\n \n for j in range(i+1, n_images):\n feat_j = features[j]\n if isinstance(feat_j, torch.Tensor):\n feat_j = feat_j.cpu().numpy()\n feat_j = feat_j.reshape(-1, feat_j.shape[-1])\n \n dist_i = np.linalg.norm(feat_i[:, None, :] - codebook[None, :, :], axis=2)\n dist_j = np.linalg.norm(feat_j[:, None, :] - codebook[None, :, :], axis=2)\n \n assign_i = np.argmin(dist_i, axis=1)\n assign_j = np.argmin(dist_j, axis=1)\n \n common = len(set(assign_i) & set(assign_j))\n sim = common / max(len(set(assign_i)), len(set(assign_j)))\n \n similarity_matrix[i, j] = sim\n similarity_matrix[j, i] = sim\n \n if (i + 1) % 10 == 0:\n print(f\"Processed {i+1}/{n_images} images\")\n \n print(\"✓ ASMK similarity computation completed\")\n \n except Exception as e:\n print(f\"⚠️ ASMK failed: {e}, using cosine similarity\")\n global_features_norm = global_features / (np.linalg.norm(global_features, axis=1, keepdims=True) + 1e-8)\n similarity_matrix = global_features_norm @ global_features_norm.T\n \n else:\n print(\"No codebook provided, using cosine similarity\")\n global_features_norm = global_features / (np.linalg.norm(global_features, axis=1, keepdims=True) + 1e-8)\n similarity_matrix = global_features_norm @ global_features_norm.T\n \n np.fill_diagonal(similarity_matrix, -1)\n \n print(f\"Similarity matrix shape: {similarity_matrix.shape}\")\n print(f\"Similarity range: [{similarity_matrix.min():.3f}, {similarity_matrix.max():.3f}]\")\n \n return similarity_matrix\n\n\ndef build_pairs_from_similarity(similarity_matrix, top_k=10):\n \"\"\"類似度行列からペアを構築\"\"\"\n n_images = similarity_matrix.shape[0]\n pairs = []\n \n for i in range(n_images):\n similarities = similarity_matrix[i]\n top_indices = np.argsort(similarities)[::-1][:top_k]\n \n for j in top_indices:\n if j > i:\n pairs.append((i, j))\n \n pairs = list(set(pairs))\n print(f\"✓ Built {len(pairs)} unique pairs\")\n \n return pairs\n\n\ndef get_image_pairs_asmk(image_paths, max_pairs=100):\n \"\"\"ASMKを使用して画像ペアを取得\"\"\"\n print(\"\\n=== Getting Image Pairs with ASMK ===\")\n \n device = Config.DEVICE\n model, codebook = load_asmk_retrieval_model(device)\n features = extract_mast3r_features(model, image_paths, device)\n similarity_matrix = compute_asmk_similarity(features, codebook)\n pairs = build_pairs_from_similarity(similarity_matrix, Config.RETRIEVAL_TOPK)\n \n # モデルを解放\n del model\n clear_memory()\n \n if len(pairs) > max_pairs:\n pairs = pairs[:max_pairs]\n print(f\"Limited to {max_pairs} pairs\")\n \n return pairs\n\n\n# ======================================================================\n# MAST3R RECONSTRUCTION\n# ======================================================================\n\ndef run_mast3r_pairs(model, image_paths, pairs, device, batch_size=1):\n \"\"\"MASt3Rでペア画像を処理(メモリ最適化版)\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n from dust3r.utils.image import load_images\n \n # ペアを制限(メモリ節約)\n max_pairs_for_memory = 50\n if len(pairs) > max_pairs_for_memory:\n print(f\"⚠️ Limiting pairs from {len(pairs)} to {max_pairs_for_memory} for memory\")\n pairs = pairs[:max_pairs_for_memory]\n \n # ペアから画像インデックスを取得\n pair_indices = []\n for i, j in pairs:\n pair_indices.extend([i, j])\n unique_indices = sorted(set(pair_indices))\n \n selected_paths = [image_paths[i] for i in unique_indices]\n print(f\"Selected {len(selected_paths)} unique images from {len(pairs)} pairs\")\n \n # 画像をロード\n images = load_images(selected_paths, size=Config.IMAGE_SIZE)\n \n clear_memory()\n \n # バッチサイズを動的に調整\n available_memory = torch.cuda.get_device_properties(device).total_memory\n used_memory = torch.cuda.memory_allocated(device)\n free_memory = available_memory - used_memory\n \n if free_memory < 2e9:\n batch_size = 1\n print(f\"⚠️ Low memory, using batch_size=1\")\n \n # 推論を小さなバッチで実行\n all_outputs = []\n for i in range(0, len(images), batch_size):\n batch_images = images[i:i+batch_size]\n print(f\"Processing batch {i//batch_size + 1}/{(len(images)-1)//batch_size + 1}\")\n \n with torch.no_grad():\n output = inference([tuple(batch_images)], model, device, batch_size=len(batch_images))\n \n all_outputs.extend(output[0])\n clear_memory()\n \n print(f\"✓ Processed {len(all_outputs)} image predictions\")\n \n # Global alignmentの準備\n scene = global_aligner(\n dust3r_output=all_outputs,\n device=device,\n mode=GlobalAlignerMode.PointCloudOptimizer,\n verbose=True\n )\n \n clear_memory()\n \n # Global alignment\n print(\"Running global alignment...\")\n try:\n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=50,\n schedule='cosine', \n lr=0.01\n )\n print(f\"✓ Alignment complete (loss: {loss:.6f})\")\n except RuntimeError as e:\n if \"out of memory\" in str(e).lower():\n print(\"⚠️ OOM during alignment, trying with fewer iterations...\")\n clear_memory()\n \n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=20,\n schedule='cosine', \n lr=0.01\n )\n print(f\"✓ Alignment complete with reduced iterations (loss: {loss:.6f})\")\n else:\n raise\n \n clear_memory()\n \n return scene, images\n\n\n# ======================================================================\n# CAMERA PARAMETER EXTRACTION (PROCESS2)\n# ======================================================================\n\ndef extract_camera_params_process2(scene, image_paths, conf_threshold=1.5):\n \"\"\"Process2: sceneから直接カメラパラメータと3D点を抽出\"\"\"\n print(\"\\n=== Extracting Camera Parameters (Process2) ===\")\n \n cameras_dict = {}\n all_pts3d = []\n all_confidence = []\n \n n_images = len(image_paths)\n \n for idx in range(n_images):\n img_name = os.path.basename(image_paths[idx])\n \n try:\n # カメラパラメータを取得\n if hasattr(scene, 'im_poses'):\n pose = scene.im_poses[idx]\n elif hasattr(scene, 'get_im_poses'):\n pose = scene.get_im_poses()[idx]\n else:\n pose = torch.eye(4)\n \n if hasattr(scene, 'im_focals'):\n focal = scene.im_focals[idx]\n elif hasattr(scene, 'get_focals'):\n focal = scene.get_focals()[idx]\n else:\n focal = 1000.0\n \n if hasattr(scene, 'im_pp'):\n pp = scene.im_pp[idx]\n elif hasattr(scene, 'get_principal_points'):\n pp = scene.get_principal_points()[idx]\n else:\n pp = torch.tensor([112.0, 112.0])\n \n # テンソルをnumpyに変換\n if isinstance(pose, torch.Tensor):\n pose = pose.detach().cpu().numpy()\n if isinstance(focal, torch.Tensor):\n focal = focal.detach().cpu().item()\n if isinstance(pp, torch.Tensor):\n pp = pp.detach().cpu().numpy()\n \n # カメラパラメータを保存\n cameras_dict[img_name] = {\n 'focal': focal,\n 'pp': pp,\n 'pose': pose\n }\n \n # 3D点を取得\n if hasattr(scene, 'im_pts3d'):\n pts3d_img = scene.im_pts3d[idx]\n elif hasattr(scene, 'get_pts3d'):\n pts3d_img = scene.get_pts3d()[idx]\n else:\n pts3d_img = None\n \n # Confidenceを取得\n if hasattr(scene, 'im_conf'):\n conf_img = scene.im_conf[idx]\n elif hasattr(scene, 'get_conf'):\n conf_img = scene.get_conf()[idx]\n else:\n conf_img = None\n \n # 3D点とconfidenceを処理\n if pts3d_img is not None:\n if isinstance(pts3d_img, torch.Tensor):\n pts3d_img = pts3d_img.detach().cpu().numpy()\n \n if pts3d_img.ndim == 3:\n pts3d_flat = pts3d_img.reshape(-1, 3)\n else:\n pts3d_flat = pts3d_img\n \n all_pts3d.append(pts3d_flat)\n \n # confidenceを処理\n if conf_img is not None:\n if isinstance(conf_img, list):\n conf_img = np.array(conf_img)\n elif isinstance(conf_img, torch.Tensor):\n conf_img = conf_img.detach().cpu().numpy()\n \n if conf_img.ndim > 1:\n conf_flat = conf_img.reshape(-1)\n else:\n conf_flat = conf_img\n \n if len(conf_flat) != len(pts3d_flat):\n conf_flat = np.ones(len(pts3d_flat))\n \n all_confidence.append(conf_flat)\n else:\n all_confidence.append(np.ones(len(pts3d_flat)))\n \n except Exception as e:\n print(f\"⚠️ Error processing image {idx} ({img_name}): {e}\")\n cameras_dict[img_name] = {\n 'focal': 1000.0,\n 'pp': np.array([112.0, 112.0]),\n 'pose': np.eye(4)\n }\n continue\n \n # 全3D点を結合\n if all_pts3d:\n pts3d = np.vstack(all_pts3d)\n confidence = np.concatenate(all_confidence)\n else:\n pts3d = np.zeros((0, 3))\n confidence = np.zeros(0)\n \n print(f\"✓ Extracted camera parameters for {len(cameras_dict)} images\")\n print(f\"✓ Total 3D points: {len(pts3d)}\")\n print(f\"✓ Confidence shape: {confidence.shape}\")\n \n # Confidenceでフィルタリング\n if len(confidence) > 0:\n valid_mask = confidence > conf_threshold\n pts3d = pts3d[valid_mask]\n confidence = confidence[valid_mask]\n print(f\"✓ After confidence filtering (>{conf_threshold}): {len(pts3d)} points\")\n \n return cameras_dict, pts3d, confidence\n\n\n# ======================================================================\n# COLMAP EXPORT\n# ======================================================================\n\ndef write_colmap_sparse(cameras_dict, pts3d, confidence, image_paths, output_dir):\n \"\"\"COLMAPフォーマットでスパース再構成を書き出し\"\"\"\n print(\"\\n=== Writing COLMAP Sparse Reconstruction ===\")\n \n import pycolmap\n \n os.makedirs(output_dir, exist_ok=True)\n \n # Reconstruction オブジェクトを作成\n reconstruction = pycolmap.Reconstruction()\n \n # カメラを追加\n camera_id = reconstruction.add_camera(\n pycolmap.Camera(\n model=\"SIMPLE_PINHOLE\",\n width=Config.IMAGE_SIZE,\n height=Config.IMAGE_SIZE,\n params=[1000.0, Config.IMAGE_SIZE/2, Config.IMAGE_SIZE/2]\n )\n )\n \n # 画像を追加\n for img_idx, img_path in enumerate(image_paths):\n img_name = os.path.basename(img_path)\n \n if img_name in cameras_dict:\n cam_params = cameras_dict[img_name]\n pose = cam_params['pose']\n \n # Rotation and translation\n R = pose[:3, :3]\n t = pose[:3, 3]\n \n qvec = pycolmap.rotmat_to_qvec(R)\n tvec = t\n \n reconstruction.add_image(\n pycolmap.Image(\n id=img_idx + 1,\n name=img_name,\n camera_id=camera_id,\n qvec=qvec,\n tvec=tvec\n )\n )\n \n # 3D点を追加\n for i, pt in enumerate(pts3d):\n if i >= 100000: # 点数制限\n break\n \n reconstruction.add_point3D(\n pycolmap.Point3D(\n xyz=pt,\n color=np.array([128, 128, 128], dtype=np.uint8),\n error=1.0 - confidence[i] if i < len(confidence) else 1.0\n )\n )\n \n # 保存\n reconstruction.write(output_dir)\n \n print(f\"✓ Wrote COLMAP reconstruction to: {output_dir}\")\n print(f\" - Cameras: {len(reconstruction.cameras)}\")\n print(f\" - Images: {len(reconstruction.images)}\")\n print(f\" - Points: {len(reconstruction.points3D)}\")\n\n\n# ======================================================================\n# GAUSSIAN SPLATTING\n# ======================================================================\n\ndef run_gaussian_splatting(output_dir, iterations=30000):\n \"\"\"Gaussian Splattingを実行\"\"\"\n print(\"\\n=== Running Gaussian Splatting ===\")\n \n gs_source = output_dir\n gs_model = os.path.join(output_dir, \"output\")\n \n cmd = f\"\"\"\n python /kaggle/working/gaussian-splatting/train.py \\\n -s {gs_source} \\\n -m {gs_model} \\\n --iterations {iterations} \\\n --eval\n \"\"\"\n \n print(f\"Command: {cmd}\")\n os.system(cmd)\n \n print(f\"✓ Gaussian Splatting complete\")\n print(f\" Output: {gs_model}\")\n \n return gs_model\n\n\n# ======================================================================\n# MAIN PIPELINE\n# ======================================================================\n\ndef main_pipeline(image_dir, output_dir, square_size=256, iterations=30000, \n max_images=200, max_pairs=100, max_points=500000, \n conf_threshold=1.5):\n \"\"\"メインパイプライン(Process2のみ、メモリ最適化版)\"\"\"\n \n print(\"=\"*70)\n print(\"STEP 1: Loading and Preparing Images\")\n print(\"=\"*70)\n \n image_paths = load_images_from_directory(image_dir, max_images=max_images)\n print(f\"Loaded {len(image_paths)} images\")\n \n clear_memory()\n \n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Image Pair Selection\")\n print(\"=\"*70)\n \n max_pairs = min(max_pairs, 50)\n pairs = get_image_pairs_asmk(image_paths, max_pairs=max_pairs)\n print(f\"Selected {len(pairs)} image pairs\")\n \n clear_memory()\n \n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: MASt3R 3D Reconstruction\")\n print(\"=\"*70)\n \n device = Config.DEVICE\n model = load_mast3r_model(device)\n \n scene, mast3r_images = run_mast3r_pairs(model, image_paths, pairs, device)\n \n # モデルを解放\n del model\n clear_memory()\n \n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Converting to COLMAP (Process2 Method)\")\n print(\"=\"*70)\n \n cameras_dict, pts3d, confidence = extract_camera_params_process2(\n scene, image_paths, conf_threshold=conf_threshold\n )\n \n # sceneを解放\n del scene\n clear_memory()\n \n # 点数を制限\n if len(pts3d) > max_points:\n print(f\"⚠️ Limiting points from {len(pts3d)} to {max_points}\")\n indices = np.random.choice(len(pts3d), max_points, replace=False)\n pts3d = pts3d[indices]\n confidence = confidence[indices]\n \n print(f\"Final point count: {len(pts3d)}\")\n \n # COLMAP変換\n colmap_dir = os.path.join(output_dir, \"sparse/0\")\n os.makedirs(colmap_dir, exist_ok=True)\n \n write_colmap_sparse(cameras_dict, pts3d, confidence, image_paths, colmap_dir)\n \n clear_memory()\n \n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Running Gaussian Splatting\")\n print(\"=\"*70)\n \n gs_output = run_gaussian_splatting(\n output_dir=output_dir,\n iterations=iterations\n )\n \n return gs_output\n\n\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"print(f\"✓ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T09:38:03.577187Z","iopub.execute_input":"2026-01-21T09:38:03.577475Z","iopub.status.idle":"2026-01-21T09:38:05.934569Z","shell.execute_reply.started":"2026-01-21T09:38:03.577449Z","shell.execute_reply":"2026-01-21T09:38:05.933704Z"}},"outputs":[{"name":"stdout","text":"✓ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 1.26.4\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n","output_type":"stream"}],"execution_count":6},{"cell_type":"code","source":"# ======================================================================\n# USAGE EXAMPLE\n# ======================================================================\n\nif __name__ == \"__main__\":\n IMAGE_DIR = \"/kaggle/input/two-dogs/fountain80/fountain80\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n \n # メモリ制約を考慮したパラメータ\n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=256,\n iterations=30000,\n max_images=100, # 画像数を制限\n max_pairs=50, # ペア数を制限\n max_points=300000, # 点数を制限\n conf_threshold=1.5\n )\n \n print(\"\\n\" + \"=\"*70)\n print(\"PIPELINE COMPLETE\")\n print(\"=\"*70)\n print(f\"Output directory: {gs_output}\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T09:38:05.935996Z","iopub.execute_input":"2026-01-21T09:38:05.936376Z","iopub.status.idle":"2026-01-21T09:38:17.204062Z","shell.execute_reply.started":"2026-01-21T09:38:05.936340Z","shell.execute_reply":"2026-01-21T09:38:17.202945Z"}},"outputs":[{"name":"stdout","text":"======================================================================\nSTEP 1: Loading and Preparing Images\n======================================================================\n\nLoading images from: /kaggle/input/two-dogs/fountain80/fountain80\n✓ Found 80 images\nLoaded 80 images\n\n======================================================================\nSTEP 2: Image Pair Selection\n======================================================================\n\n=== Getting Image Pairs with ASMK ===\n\n=== Loading ASMK Retrieval Model ===\n⚠️ Failed to load MASt3R: tried to load naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric from huggingface, but failed\nTrying DUSt3R instead\n✓ Loaded DUSt3R model for retrieval\n✓ ASMK model loaded\n\n=== Extracting MASt3R Features ===\n","output_type":"stream"},{"name":"stderr","text":"Features: 0%| | 0/80 [00:00<?, ?it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/input/two-dogs/fountain80/fountain80/image_001.jpeg with resolution 1440x1920 --> 224x224\n - adding /kaggle/input/two-dogs/fountain80/fountain80/image_001.jpeg with resolution 1440x1920 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A/kaggle/working/mast3r/dust3r/dust3r/inference.py:44: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=bool(use_amp)):\n/kaggle/working/mast3r/dust3r/dust3r/model.py:206: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n/kaggle/working/mast3r/dust3r/dust3r/inference.py:48: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n\n100%|██████████| 1/1 [00:01<00:00, 1.82s/it]\u001b[A\nFeatures: 0%| | 0/80 [00:01<?, ?it/s]\n","output_type":"stream"},{"traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_417/4145952515.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;31m# メモリ制約を考慮したパラメータ\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m gs_output = main_pipeline(\n\u001b[0m\u001b[1;32m 11\u001b[0m \u001b[0mimage_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mIMAGE_DIR\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0moutput_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mOUTPUT_DIR\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/tmp/ipykernel_417/162104315.py\u001b[0m in \u001b[0;36mmain_pipeline\u001b[0;34m(image_dir, output_dir, square_size, iterations, max_images, max_pairs, max_points, conf_threshold)\u001b[0m\n\u001b[1;32m 656\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 657\u001b[0m \u001b[0mmax_pairs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_pairs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m50\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 658\u001b[0;31m \u001b[0mpairs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_image_pairs_asmk\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimage_paths\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_pairs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmax_pairs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 659\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"Selected {len(pairs)} image pairs\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 660\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/tmp/ipykernel_417/162104315.py\u001b[0m in \u001b[0;36mget_image_pairs_asmk\u001b[0;34m(image_paths, max_pairs)\u001b[0m\n\u001b[1;32m 291\u001b[0m \u001b[0mdevice\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mConfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDEVICE\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 292\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcodebook\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mload_asmk_retrieval_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 293\u001b[0;31m \u001b[0mfeatures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mextract_mast3r_features\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_paths\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 294\u001b[0m \u001b[0msimilarity_matrix\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcompute_asmk_similarity\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcodebook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 295\u001b[0m \u001b[0mpairs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mbuild_pairs_from_similarity\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msimilarity_matrix\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mConfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRETRIEVAL_TOPK\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/tmp/ipykernel_417/162104315.py\u001b[0m in \u001b[0;36mextract_mast3r_features\u001b[0;34m(model, image_paths, device, batch_size)\u001b[0m\n\u001b[1;32m 161\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 162\u001b[0m \u001b[0;31m# 最初の画像の特徴量のみ使用\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 163\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0;34m'desc'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 164\u001b[0m \u001b[0mdesc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'desc'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0;34m'pts3d'\u001b[0m \u001b[0;32min\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyError\u001b[0m: 0"],"ename":"KeyError","evalue":"0","output_type":"error"}],"execution_count":7},{"cell_type":"code","source":"print(f\"✓ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T09:38:17.205039Z","iopub.status.idle":"2026-01-21T09:38:17.205392Z","shell.execute_reply.started":"2026-01-21T09:38:17.205254Z","shell.execute_reply":"2026-01-21T09:38:17.205273Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}