stpete2 commited on
Commit
e069783
·
verified ·
1 Parent(s): 9c35d43

Delete dino-lightglue-mast3r-gs-kg-07.ipynb

Browse files
dino-lightglue-mast3r-gs-kg-07.ipynb DELETED
@@ -1 +0,0 @@
1
- {"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"colab":{"provenance":[],"gpuType":"T4","machine_shape":"hm"},"accelerator":"GPU","kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":14554378,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **dino-lightglue-mast3r-gs-kg**\n2026/01/20","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"https://huggingface.co/datasets/stpete2/ipynb/resolve/main/dino_lightglue_mast3r_gs_colab_48oo.ipynb","metadata":{}},{"cell_type":"markdown","source":"# **setup**","metadata":{"id":"vXt8y7QyyRn9"}},{"cell_type":"code","source":"# 1. NumPyを下げつつ、それと互換性のある ml_dtypes をセットで入れる\n!pip install numpy==1.26.4 ml_dtypes==0.5.4","metadata":{"id":"zzIlYMf5ozkH","outputId":"1b6b080d-a90c-4612-fe9b-7c285e4d6f28","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"break","metadata":{"id":"Dsic4JslI1l4","outputId":"25ee6ab0-42b1-42aa-a626-ed9331dfccbd","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"#**セッションを再起動する、下のセルを実行する**","metadata":{"id":"n3HClCivHr9W"}},{"cell_type":"code","source":"import numpy as np\nprint(f\"NumPy version: {np.__version__}\")\n\nimport ml_dtypes\nprint(f\"ML_Dtypes version: {ml_dtypes.__version__}\")","metadata":{"id":"inVfYAjA2nQ9","outputId":"4c59bb48-324e-4d17-dc69-9d53d55db806","trusted":true,"execution":{"iopub.status.busy":"2026-01-20T05:55:23.854494Z","iopub.execute_input":"2026-01-20T05:55:23.854847Z","iopub.status.idle":"2026-01-20T05:55:23.860433Z","shell.execute_reply.started":"2026-01-20T05:55:23.854820Z","shell.execute_reply":"2026-01-20T05:55:23.859337Z"}},"outputs":[{"name":"stdout","text":"NumPy version: 1.26.4\nML_Dtypes version: 0.5.4\n","output_type":"stream"}],"execution_count":35},{"cell_type":"code","source":"from transformers import AutoImageProcessor, AutoModel","metadata":{"id":"jTO3dSS5HXrC","trusted":true,"execution":{"iopub.status.busy":"2026-01-20T05:55:23.862134Z","iopub.execute_input":"2026-01-20T05:55:23.862436Z","iopub.status.idle":"2026-01-20T05:55:23.879724Z","shell.execute_reply.started":"2026-01-20T05:55:23.862403Z","shell.execute_reply":"2026-01-20T05:55:23.878800Z"}},"outputs":[],"execution_count":36},{"cell_type":"code","source":"import os\nimport sys\nimport glob\nimport shutil\nimport subprocess\nimport site\n\ndef setup_environment():\n \"\"\"\n Gaussian Splatting環境セットアップ(改善版)\n diff-gaussian-rasterization と simple-knn のインストールを確実に行う\n \"\"\"\n\n print(\"🚀 Gaussian Splatting Environment Setup\")\n\n WORK_DIR = \"/content/gaussian-splatting\"\n\n # =====================================================================\n # STEP 1: System packages\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: System packages\")\n print(\"=\"*70)\n\n subprocess.run([\"apt-get\", \"update\", \"-qq\"], capture_output=True)\n subprocess.run([\n \"apt-get\", \"install\", \"-y\", \"-qq\",\n \"colmap\", \"build-essential\", \"cmake\", \"git\",\n \"libopenblas-dev\", \"xvfb\"\n ], capture_output=True)\n\n os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n os.environ[\"DISPLAY\"] = \":99\"\n subprocess.Popen(\n [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n )\n print(\"✓ System packages installed\")\n\n\n # =====================================================================\n # STEP 2: Clone Gaussian Splatting with submodules\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Clone Gaussian Splatting\")\n print(\"=\"*70)\n\n if not os.path.exists(WORK_DIR):\n result = subprocess.run([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ], capture_output=True, text=True)\n \n if result.returncode != 0:\n print(f\"ERROR cloning repo: {result.stderr}\")\n return None\n print(\"✓ Cloned with submodules\")\n else:\n print(\"✓ Already exists\")\n # 既存の場合でもサブモジュールを確認・更新\n print(\" Updating submodules...\")\n subprocess.run(\n [\"git\", \"submodule\", \"update\", \"--init\", \"--recursive\"],\n cwd=WORK_DIR,\n capture_output=True\n )\n\n\n # =====================================================================\n # STEP 3: NUCLEAR - Physically delete ALL numpy\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: NUCLEAR - Force delete ALL NumPy installations\")\n print(\"=\"*70)\n\n subprocess.run(\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\", \"scipy\"],\n capture_output=True\n )\n\n site_packages = site.getsitepackages() + [site.getusersitepackages()]\n print(f\"Searching {len(site_packages)} site-packages directories...\")\n\n deleted_count = 0\n for sp in site_packages:\n if not os.path.exists(sp):\n continue\n\n for pattern in [\"numpy*\", \"scipy*\"]:\n dirs = glob.glob(os.path.join(sp, pattern))\n for d in dirs:\n try:\n if os.path.isdir(d):\n shutil.rmtree(d)\n else:\n os.remove(d)\n print(f\" Deleted: {d}\")\n deleted_count += 1\n except Exception as e:\n print(f\" Warning: Could not delete {d}: {e}\")\n\n print(f\"✓ Deleted {deleted_count} numpy/scipy installations\")\n\n\n # =====================================================================\n # STEP 4: Clean install - SciPy first\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Clean install - SciPy first strategy\")\n print(\"=\"*70)\n\n subprocess.run(\n [sys.executable, \"-m\", \"pip\", \"install\", \"scipy\"],\n capture_output=True,\n check=True\n )\n\n for sp in site_packages:\n if not os.path.exists(sp):\n continue\n numpy_dirs = glob.glob(os.path.join(sp, \"numpy*\"))\n for d in numpy_dirs:\n try:\n if os.path.isdir(d):\n shutil.rmtree(d)\n else:\n os.remove(d)\n print(f\" Deleted: {d}\")\n except:\n pass\n\n subprocess.run(\n [sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"],\n capture_output=True,\n check=True\n )\n\n print(\"✓ Clean numpy 1.26.4 + scipy installed\")\n\n\n # =====================================================================\n # STEP 5: Install other packages\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Install other packages\")\n print(\"=\"*70)\n\n packages = [\n \"torch torchvision torchaudio\",\n \"opencv-python pillow imageio imageio-ffmpeg plyfile tqdm tensorboard psutil\",\n \"transformers==4.40.0\",\n \"kornia h5py matplotlib\",\n \"git+https://github.com/cvg/LightGlue.git\",\n \"pycolmap\"\n ]\n\n for pkg in packages:\n subprocess.run(\n [sys.executable, \"-m\", \"pip\", \"install\"] + pkg.split(),\n capture_output=True\n )\n\n print(\"✓ All packages installed\")\n\n\n # =====================================================================\n # STEP 6: Build GS submodules (改善版)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 6: Build Gaussian Splatting submodules (IMPROVED)\")\n print(\"=\"*70)\n\n # サブモジュールの設定\n submodules = {\n \"diff-gaussian-rasterization\": {\n \"path\": os.path.join(WORK_DIR, \"submodules\", \"diff-gaussian-rasterization\"),\n \"repo\": \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\"\n },\n \"simple-knn\": {\n \"path\": os.path.join(WORK_DIR, \"submodules\", \"simple-knn\"),\n \"repo\": \"https://github.com/camenduru/simple-knn.git\"\n }\n }\n\n for name, info in submodules.items():\n print(f\"\\n--- Installing {name} ---\")\n path = info[\"path\"]\n repo = info[\"repo\"]\n \n # ディレクトリが存在しない場合はクローン\n if not os.path.exists(path):\n print(f\"Cloning {name}...\")\n result = subprocess.run(\n [\"git\", \"clone\", repo, path],\n capture_output=True,\n text=True\n )\n if result.returncode != 0:\n print(f\"ERROR cloning {name}: {result.stderr}\")\n continue\n else:\n print(f\"✓ {name} directory exists\")\n \n # ディレクトリ内容の確認\n if os.path.exists(path):\n files = os.listdir(path)\n print(f\" Files in {name}: {len(files)} items\")\n if len(files) == 0:\n print(f\" WARNING: {name} directory is empty!\")\n continue\n \n # setup.py の存在確認\n setup_py = os.path.join(path, \"setup.py\")\n if not os.path.exists(setup_py):\n print(f\" WARNING: setup.py not found in {name}!\")\n continue\n print(f\" ✓ setup.py found\")\n \n # インストール実行(詳細なログ付き)\n print(f\"Installing {name}...\")\n result = subprocess.run(\n [sys.executable, \"-m\", \"pip\", \"install\", path, \"-v\"],\n capture_output=True,\n text=True,\n cwd=WORK_DIR\n )\n \n if result.returncode != 0:\n print(f\"ERROR installing {name}:\")\n print(\"STDOUT:\", result.stdout[-500:] if len(result.stdout) > 500 else result.stdout)\n print(\"STDERR:\", result.stderr[-500:] if len(result.stderr) > 500 else result.stderr)\n \n # 代替インストール方法を試す\n print(f\"Trying alternative installation method for {name}...\")\n result2 = subprocess.run(\n [sys.executable, \"setup.py\", \"install\"],\n capture_output=True,\n text=True,\n cwd=path\n )\n \n if result2.returncode != 0:\n print(f\"Alternative method also failed:\")\n print(\"STDERR:\", result2.stderr[-500:] if len(result2.stderr) > 500 else result2.stderr)\n else:\n print(f\"✓ {name} installed (alternative method)\")\n else:\n print(f\"✓ {name} installed successfully\")\n \n # インストール確認\n try:\n if name == \"diff-gaussian-rasterization\":\n import diff_gaussian_rasterization\n print(f\" ✓ {name} import successful\")\n elif name == \"simple-knn\":\n import simple_knn\n print(f\" ✓ {name} import successful\")\n except ImportError as e:\n print(f\" ✗ {name} import failed: {e}\")\n\n print(\"\\n✓ Submodules installation complete\")\n\n\n # =====================================================================\n # STEP 7: Final verification\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 7: Final verification\")\n print(\"=\"*70)\n\n # NumPy 2.x の最終チェック\n for sp in site_packages:\n if not os.path.exists(sp):\n continue\n\n numpy2_dirs = glob.glob(os.path.join(sp, \"numpy-2.*\"))\n if numpy2_dirs:\n print(f\"⚠️ Found numpy 2.x installations: {len(numpy2_dirs)}\")\n for d in numpy2_dirs:\n try:\n shutil.rmtree(d)\n print(f\" Nuked: {d}\")\n except:\n pass\n\n subprocess.run(\n [sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\", \"--force-reinstall\"],\n capture_output=True\n )\n\n # 最終的なインポートテスト\n print(\"\\n--- Final Import Tests ---\")\n \n test_imports = [\n (\"numpy\", \"NumPy\"),\n (\"torch\", \"PyTorch\"),\n (\"diff_gaussian_rasterization\", \"Diff Gaussian Rasterization\"),\n (\"simple_knn\", \"Simple KNN\")\n ]\n \n all_success = True\n for module_name, display_name in test_imports:\n try:\n __import__(module_name)\n print(f\"✓ {display_name}: OK\")\n except ImportError as e:\n print(f\"✗ {display_name}: FAILED - {e}\")\n all_success = False\n \n print(\"\\n\" + \"=\"*70)\n if all_success:\n print(\"🎉 ALL CHECKS PASSED - Ready for Gaussian Splatting!\")\n else:\n print(\"⚠️ SOME CHECKS FAILED - Please review errors above\")\n print(\"=\"*70)\n\n return WORK_DIR\n\n\nif __name__ == \"__main__\":\n work_dir = setup_environment()\n if work_dir:\n print(f\"\\nGaussian Splatting directory: {work_dir}\")\n print(\"You can now run: %cd {work_dir}\")\n else:\n print(\"\\nSetup failed. Please review errors above.\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-20T05:55:23.972474Z","iopub.execute_input":"2026-01-20T05:55:23.973028Z","execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[{"name":"stdout","text":"🚀 Gaussian Splatting Environment Setup\n\n======================================================================\nSTEP 1: System packages\n======================================================================\n✓ System packages installed\n\n======================================================================\nSTEP 2: Clone Gaussian Splatting\n======================================================================\n✓ Already exists\n Updating submodules...\n\n======================================================================\nSTEP 3: NUCLEAR - Force delete ALL NumPy installations\n======================================================================\nSearching 4 site-packages directories...\n Deleted: /usr/local/lib/python3.12/dist-packages/numpy.libs\n Deleted: /usr/local/lib/python3.12/dist-packages/numpy\n✓ Deleted 2 numpy/scipy installations\n\n======================================================================\nSTEP 4: Clean install - SciPy first strategy\n======================================================================\n Deleted: /usr/local/lib/python3.12/dist-packages/numpy.libs\n Deleted: /usr/local/lib/python3.12/dist-packages/numpy-2.4.1.dist-info\n Deleted: /usr/local/lib/python3.12/dist-packages/numpy\n✓ Clean numpy 1.26.4 + scipy installed\n\n======================================================================\nSTEP 5: Install other packages\n======================================================================\n","output_type":"stream"}],"execution_count":null},{"cell_type":"code","source":"import numpy as np\nprint(f\"✓ NumPy: {np.__version__}\")","metadata":{"outputId":"e2e1402f-e3f2-4260-aa05-b572c9ac2c74","id":"nzzRu5emNQAj","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\nimport PIL\n\nfrom transformers import AutoConfig\nfrom transformers import AutoImageProcessor","metadata":{"id":"Ib-xRVVIy2PC","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport sys\n\n%cd /content/gaussian-splatting\n\nfiles = ['database.py', 'h5_to_db.py', 'metric.py']\nbase_url = 'https://huggingface.co/stpete2/imc25_utils/resolve/main/'\n\nfor file in files:\n if not os.path.exists(file):\n !wget -q {base_url + file}\n print(f\"✓ {file} download complete\")\n else:\n print(f\"✓ {file} already exists\")\n","metadata":{"id":"eJrkKiCLzt1G","outputId":"11f9b0b8-c08b-44f0-c50a-f480047b0363","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from database import COLMAPDatabase, image_ids_to_pair_id\nfrom h5_to_db import add_keypoints, add_matches\nfrom metric import *","metadata":{"id":"bmM_IBUtrEMd","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#success\n\ndef setup_mast3r():\n \"\"\"Install and setup MASt3R\"\"\"\n print(\"\\n=== Setting up MASt3R ===\")\n\n os.chdir('/content')\n\n # Remove existing installation\n if os.path.exists('mast3r'):\n print(\"Removing existing MASt3R installation...\")\n os.system('rm -rf mast3r')\n\n # Clone repository\n print(\"Cloning MASt3R repository...\")\n os.system('git clone --recursive https://github.com/naver/mast3r')\n os.chdir('/content/mast3r')\n\n # Check dust3r directory\n print(\"Checking dust3r structure...\")\n os.system('ls -la dust3r/')\n\n # Install dust3r\n print(\"Installing dust3r...\")\n os.system('cd dust3r && python -m pip install -e .')\n\n # Install croco\n print(\"Installing croco...\")\n os.system('cd dust3r/croco && python -m pip install -e .')\n\n # Install requirements\n print(\"Installing MASt3R requirements...\")\n os.system('pip install -r requirements.txt')\n\n # Download model weights\n print(\"Downloading model weights...\")\n os.system('mkdir -p checkpoints')\n os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n\n # Install additional dependencies\n print(\"Installing additional dependencies...\")\n os.system('pip install trimesh matplotlib roma')\n\n # Add to path\n sys.path.insert(0, '/content/mast3r')\n sys.path.insert(0, '/content/mast3r/dust3r')\n\n # Verification\n print(\"\\n🔍 Verifying MASt3R installation...\")\n try:\n from mast3r.model import AsymmetricMASt3R\n print(\" ✓ MASt3R import: OK\")\n except Exception as e:\n print(f\" ❌ MASt3R import failed: {e}\")\n raise\n\n print(\"✓ MASt3R setup complete!\")\n\nif __name__ == \"__main__\":\n setup_mast3r()","metadata":{"id":"3-CN6HJvZ6u2","outputId":"04501f64-ad96-448e-aecb-8caceda0b04e","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\nimport numpy as np\nimport sys\n\n# listify関数も必要なので定義\ndef listify(x):\n return list(x) if isinstance(x, (list, tuple)) else [x]","metadata":{"id":"kTPzKB2vYn6b","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# /content/mast3r/dust3r/dust3r/utils/device.py の該当関数全体を置き換え\n\ndef collate_with_cat(whatever, lists=False):\n if isinstance(whatever, (list, tuple)):\n if not whatever:\n return whatever\n elem = whatever[0]\n\n T = type(elem)\n if T is torch.Tensor or (T is torch.nn.parameter.Parameter):\n return listify(whatever) if lists else torch.cat(whatever)\n\n # numpyの型を確実に処理\n elem_type_name = type(elem).__name__\n elem_module = type(elem).__module__\n\n if elem_type_name == 'ndarray' or (elem_module == 'numpy' and elem_type_name == 'ndarray'):\n tensors = []\n for x in whatever:\n # 確実にnumpy配列として扱う\n if hasattr(x, '__array__'):\n arr = np.asarray(x)\n else:\n arr = np.array(x)\n tensors.append(torch.from_numpy(arr))\n return listify(tensors) if lists else torch.cat(tensors)","metadata":{"id":"SpqfW2PDXp-H","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# MASt3Rのモジュールをインポート(まだインポートされていない場合)\nif 'dust3r.utils.device' not in sys.modules:\n from dust3r.utils import device as device_module\nelse:\n device_module = sys.modules['dust3r.utils.device']\n\n# 関数を置き換え\ndevice_module.collate_with_cat = collate_with_cat\nprint(\"✓ collate_with_cat関数を置き換えました\")","metadata":{"id":"AUg6a4lEXeS-","outputId":"5b5873d6-e7ca-40ea-b023-6a52d646f437","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"id":"oVGIvNWSpzlZ","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\n\n\n# LightGlue\nfrom lightglue import ALIKED, LightGlue\nfrom lightglue.utils import load_image\n\n","metadata":{"id":"GvtwW6aXpzeG","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\"\"\"\nGaussian Splatting Pipeline\nSimple and robust pipeline: LightGlue → COLMAP → Gaussian Splatting\n\"\"\"\n\"\"\"\nGaussian Splatting Pipeline\nSimple and robust pipeline: LightGlue → MASt3R → Gaussian Splatting\n\"\"\"\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection\n GLOBAL_TOPK = 80 #全画像数の半分程度\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\"\n\n # MASt3R settings (重要: これらが欠けていました!)\n MAST3R_MODEL = \"/content/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n MAST3R_IMAGE_SIZE = 224 # メモリを節約するため小さめ(224 or 512)\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')","metadata":{"id":"7NfrJdMvrPZn","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"id":"eFExgZs-k0l9","trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 0: images_square\n# ============================================================================\n\ndef preprocess_images_square(input_dir, output_dir, size=1024, background='black'):\n \"\"\"\n Standardize all images to a square format (maintaining aspect ratio with padding).\n\n Args:\n input_dir (str): Directory containing input images.\n output_dir (str): Directory to save processed images.\n size (int): Target square dimension (default: 1024).\n background (str): Background style: 'black', 'white', or 'blur'.\n \"\"\"\n from PIL import Image, ImageFilter\n import os\n from tqdm import tqdm\n\n print(f\"\\n=== Preprocessing to {size}x{size} Square Images ===\")\n\n os.makedirs(output_dir, exist_ok=True)\n\n image_files = sorted([\n f for f in os.listdir(input_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n\n stats = {\n 'total': len(image_files),\n 'landscape': 0,\n 'portrait': 0,\n 'square': 0,\n 'resized': 0,\n }\n\n for img_file in tqdm(image_files, desc=\"Converting to square\"):\n img_path = os.path.join(input_dir, img_file)\n img = Image.open(img_path).convert('RGB')\n\n width, height = img.size\n\n # Statistics\n if width > height:\n stats['landscape'] += 1\n elif width < height:\n stats['portrait'] += 1\n else:\n stats['square'] += 1\n\n # Resize based on the longest side\n max_dim = max(width, height)\n if max_dim != size:\n scale = size / max_dim\n new_width = int(width * scale)\n new_height = int(height * scale)\n img = img.resize((new_width, new_height), Image.LANCZOS)\n stats['resized'] += 1\n else:\n new_width, new_height = width, height\n\n # Create background\n if background == 'black':\n canvas = Image.new('RGB', (size, size), (0, 0, 0))\n elif background == 'white':\n canvas = Image.new('RGB', (size, size), (255, 255, 255))\n elif background == 'blur':\n # Use a blurred version of the image as background for a professional look\n canvas = img.resize((size, size), Image.LANCZOS)\n canvas = canvas.filter(ImageFilter.GaussianBlur(radius=20))\n else:\n canvas = Image.new('RGB', (size, size), (0, 0, 0))\n\n # Center the image\n offset_x = (size - new_width) // 2\n offset_y = (size - new_height) // 2\n canvas.paste(img, (offset_x, offset_y))\n\n # Save output\n output_path = os.path.join(output_dir, img_file)\n canvas.save(output_path, quality=95, optimize=True)\n\n print(f\"\\n✓ Preprocessing complete:\")\n print(f\" Total images: {stats['total']}\")\n print(f\" Landscape: {stats['landscape']} / Portrait: {stats['portrait']} / Square: {stats['square']}\")\n print(f\" Resized: {stats['resized']}\")\n print(f\" Output size: {size}x{size}\")\n\n return output_dir","metadata":{"id":"TkVzKRqsvxFZ","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = input_dir\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n converted_count = 0\n size_stats = {}\n\n for img_file in sorted(os.listdir(input_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops","metadata":{"id":"A6smO9X0el3d","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 1: Image Pair Selection (DINO + ALIKED local verify)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n from PIL import Image\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n for img_path in tqdm(image_paths):\n img = load_torch_image(img_path, device)\n with torch.no_grad():\n inputs = processor(images=img, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model\n torch.cuda.empty_cache()\n gc.collect()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n return list(set(pairs))\n\ndef extract_aliked_features(image_paths, device):\n \"\"\"Extract ALIKED local features\"\"\"\n print(\"\\n=== Extracting ALIKED Local Features ===\")\n\n extractor = ALIKED(\n model_name=\"aliked-n16\",\n max_num_keypoints=Config.N_KEYPOINTS,\n detection_threshold=0.01,\n resize=Config.IMAGE_SIZE\n ).eval().to(device)\n\n features = []\n for img_path in tqdm(image_paths):\n img = load_torch_image(img_path, device)\n with torch.no_grad():\n feats = extractor.extract(img)\n kpts = feats['keypoints'].reshape(-1, 2).cpu()\n descs = feats['descriptors'].reshape(len(kpts), -1).cpu()\n features.append({'keypoints': kpts, 'descriptors': descs})\n\n del extractor\n torch.cuda.empty_cache()\n gc.collect()\n\n return features\n\ndef verify_pairs_locally(pairs, features, device, threshold=Config.MIN_MATCHES):\n \"\"\"Verify pairs using local descriptor matching\"\"\"\n print(\"\\n=== Verifying Pairs with Local Features ===\")\n\n verified = []\n for i, j in tqdm(pairs):\n desc1 = features[i]['descriptors'].to(device)\n desc2 = features[j]['descriptors'].to(device)\n\n if len(desc1) == 0 or len(desc2) == 0:\n continue\n\n # Simple mutual nearest neighbor\n dist = torch.cdist(desc1, desc2, p=2)\n min_dist = dist.min(dim=1)[0]\n n_matches = (min_dist < Config.RATIO_THR).sum().item()\n\n if n_matches >= threshold:\n verified.append((i, j))\n\n return verified\n\ndef get_image_pairs(image_paths, max_pairs=None):\n \"\"\"Main pair selection pipeline\"\"\"\n device = Config.DEVICE\n\n # 1. DINO global\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from global features: {len(pairs)}\")\n\n # 2. ALIKED local\n features = extract_aliked_features(image_paths, device)\n\n # 3. Local verification\n verified_pairs = verify_pairs_locally(pairs, features, device)\n\n print(f\"Verified pairs: {len(verified_pairs)}\")\n \n # ===== ここに追加 =====\n if max_pairs and len(verified_pairs) > max_pairs:\n print(f\"⚠️ Limiting pairs from {len(verified_pairs)} to {max_pairs}\")\n verified_pairs = verified_pairs[:max_pairs]\n # =====================\n\n return verified_pairs, features","metadata":{"id":"FNjFURfYmVcL","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 2: Feature Matching (ALIKED + LightGlue)\n# ============================================================================\n\ndef match_pairs_lightglue(image_paths, pairs, features, output_dir):\n \"\"\"\n Match image pairs using LightGlue\n \"\"\"\n print(\"\\n=== Matching with LightGlue ===\")\n\n os.makedirs(output_dir, exist_ok=True)\n keypoints_path = os.path.join(output_dir, 'keypoints.h5')\n matches_path = os.path.join(output_dir, 'matches.h5')\n\n if os.path.exists(keypoints_path):\n os.remove(keypoints_path)\n if os.path.exists(matches_path):\n os.remove(matches_path)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n extractor = ALIKED(max_num_keypoints=4096, detection_threshold=0.2, nms_radius=2).eval().to(device)\n matcher = LightGlue(features='aliked').eval().to(device)\n\n if isinstance(features, dict):\n all_keypoints = features['keypoints']\n all_descriptors = features['descriptors']\n elif isinstance(features, list):\n all_keypoints = [f['keypoints'] for f in features]\n all_descriptors = [f['descriptors'] for f in features]\n else:\n raise ValueError(f\"Unsupported features type: {type(features)}\")\n\n with h5py.File(keypoints_path, 'w') as f_kp:\n for idx, img_path in enumerate(tqdm(image_paths, desc=\"Saving keypoints\")):\n img_name = os.path.splitext(os.path.basename(img_path))[0]\n\n kp = all_keypoints[idx]\n if torch.is_tensor(kp):\n kp = kp.cpu().numpy()\n f_kp.create_dataset(img_name, data=kp)\n\n # Match pairs\n with h5py.File(matches_path, 'w') as f_match:\n for idx1, idx2 in tqdm(pairs, desc=\"Matching\"):\n with torch.no_grad():\n kp0 = all_keypoints[idx1]\n kp1 = all_keypoints[idx2]\n desc0 = all_descriptors[idx1]\n desc1 = all_descriptors[idx2]\n\n if isinstance(kp0, np.ndarray):\n kp0 = torch.from_numpy(kp0).float().to(device)\n kp1 = torch.from_numpy(kp1).float().to(device)\n desc0 = torch.from_numpy(desc0).float().to(device)\n desc1 = torch.from_numpy(desc1).float().to(device)\n else:\n kp0 = kp0.float().to(device)\n kp1 = kp1.float().to(device)\n desc0 = desc0.float().to(device)\n desc1 = desc1.float().to(device)\n\n feats0 = {\n 'keypoints': kp0.unsqueeze(0) if kp0.dim() == 2 else kp0,\n 'descriptors': desc0.unsqueeze(0) if desc0.dim() == 2 else desc0,\n }\n feats1 = {\n 'keypoints': kp1.unsqueeze(0) if kp1.dim() == 2 else kp1,\n 'descriptors': desc1.unsqueeze(0) if desc1.dim() == 2 else desc1,\n }\n\n matches01 = matcher({'image0': feats0, 'image1': feats1})\n\n if 'matches0' in matches01:\n matches0 = matches01['matches0']\n if isinstance(matches0, list):\n matches0 = matches0[0]\n\n # CUDAテンソルをCPUに移動\n if torch.is_tensor(matches0):\n matches0 = matches0.detach().cpu().numpy()\n\n valid = matches0 > -1\n if torch.is_tensor(valid):\n valid = valid.cpu().numpy()\n\n # 標準的なnumpy配列として取得\n valid_indices = np.where(valid)[0]\n valid_matches = matches0[valid]\n\n # 手動で2列の配列を構築\n n = len(valid_indices)\n matches = np.empty((n, 2), dtype=np.int64)\n matches[:, 0] = valid_indices\n matches[:, 1] = valid_matches\n\n elif 'matches' in matches01:\n m = matches01['matches']\n if torch.is_tensor(m):\n m = m.detach().cpu().numpy()\n matches = m\n\n else:\n continue\n\n if len(matches) > 0:\n img_name1 = os.path.splitext(os.path.basename(image_paths[idx1]))[0]\n img_name2 = os.path.splitext(os.path.basename(image_paths[idx2]))[0]\n pair_key = f\"{img_name1}_{img_name2}\"\n f_match.create_dataset(pair_key, data=matches)\n\n print(f\"✓ Matches saved to {matches_path}\")\n\n","metadata":{"id":"X-PKgmdwmVcL","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import numpy as np\nprint(f\"✓ NumPy: {np.__version__}\")","metadata":{"id":"Tg_SJYlwkeiD","outputId":"9cc8b0cd-a2bf-4020-94e0-9739d4567770","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\nfrom pathlib import Path\nfrom tqdm import tqdm","metadata":{"id":"7D86wFMan2X8","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# 1. まず、すべての通常のインポート\n# ============================================================================\nfrom dust3r.image_pairs import make_pairs\nfrom dust3r.inference import inference\nfrom dust3r.utils.image import load_images\nfrom dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n\n# ============================================================================\n# 2. 次に、修正関数を定義してモンキーパッチ\n# ============================================================================\nimport torch\nimport numpy as np\nfrom dust3r.utils.device import to_cpu\nfrom dust3r.inference import check_if_same_size\nimport dust3r.inference\nimport dust3r.utils.misc\n\n# is_symmetrized関数を修正版に置き換え\ndef is_symmetrized_fixed(gt1, gt2):\n \"\"\"\n is_symmetrizedの修正版 - IndexErrorを回避\n \"\"\"\n # instanceフィールドをチェック\n if 'instance' in gt1 and 'instance' in gt2:\n x = gt1['instance']\n y = gt2['instance']\n\n # リストの場合\n if isinstance(x, list) and isinstance(y, list):\n if len(x) != len(y):\n return False\n if len(x) < 2 or len(y) < 2:\n return False\n ok = True\n for i in range(0, len(x), 2):\n if i + 1 >= len(x) or i + 1 >= len(y):\n return False\n ok = ok and (x[i] == y[i + 1]) and (x[i + 1] == y[i])\n return ok\n\n # 文字列の場合\n elif isinstance(x, str) and isinstance(y, str):\n if len(x) != len(y):\n return False\n if len(x) < 2 or len(y) < 2:\n return False\n ok = True\n for i in range(0, len(x), 2):\n if i + 1 >= len(x) or i + 1 >= len(y):\n return False\n ok = ok and (x[i] == y[i + 1]) and (x[i + 1] == y[i])\n return ok\n\n return False\n\n\ndef collate_with_cat_fixed(batch, lists=False):\n \"\"\"collate_with_catの修正版\"\"\"\n if not batch:\n return None\n\n if len(batch) == 1:\n elem = batch[0]\n if isinstance(elem, (list, tuple)) and len(elem) == 2:\n view1, view2 = elem\n if isinstance(view1, dict) and isinstance(view2, dict):\n view1 = convert_numpy_to_tensor(view1)\n view2 = convert_numpy_to_tensor(view2)\n return (view1, view2)\n\n if isinstance(batch[0], (list, tuple)):\n view1_list = []\n view2_list = []\n\n for pair in batch:\n if len(pair) == 2:\n v1 = convert_numpy_to_tensor(pair[0])\n v2 = convert_numpy_to_tensor(pair[1])\n view1_list.append(v1)\n view2_list.append(v2)\n\n def stack_dicts(dict_list):\n if not dict_list:\n return {}\n\n result = {}\n for key in dict_list[0].keys():\n values = [d[key] for d in dict_list]\n\n if isinstance(values[0], torch.Tensor):\n result[key] = torch.cat(values, dim=0)\n elif isinstance(values[0], np.ndarray):\n tensors = [torch.from_numpy(v) if isinstance(v, np.ndarray) else v for v in values]\n result[key] = torch.cat(tensors, dim=0)\n elif isinstance(values[0], (list, tuple)):\n result[key] = []\n for v in values:\n result[key].extend(v if isinstance(v, list) else [v])\n else:\n result[key] = values\n\n return result\n\n view1_batched = stack_dicts(view1_list)\n view2_batched = stack_dicts(view2_list)\n\n return (view1_batched, view2_batched)\n\n return None\n\n\ndef convert_numpy_to_tensor(view_dict):\n \"\"\"辞書内のnumpy配列をTensorに変換\"\"\"\n result = {}\n for key, value in view_dict.items():\n if isinstance(value, np.ndarray):\n result[key] = torch.from_numpy(value)\n else:\n result[key] = value\n return result\n\n\ndef loss_of_one_batch_fixed(batch, model, criterion, device, symmetrize_batch=False, use_amp=False, ret=None):\n view1, view2 = batch\n ignore_keys = set(['depthmap', 'dataset', 'label', 'instance', 'idx', 'true_shape', 'rng'])\n for view in batch:\n for name in view.keys():\n if name in ignore_keys:\n continue\n view[name] = view[name].to(device, non_blocking=True)\n\n with torch.cuda.amp.autocast(enabled=bool(use_amp)):\n pred1, pred2 = model(view1, view2)\n\n with torch.cuda.amp.autocast(enabled=False):\n loss = criterion(view1, view2, pred1, pred2) if criterion is not None else None\n\n result = dict(view1=view1, view2=view2, pred1=pred1, pred2=pred2, loss=loss)\n return result[ret] if ret else result\n\n\n@torch.no_grad()\ndef inference_debug(pairs, model, device, batch_size=8, verbose=True):\n \"\"\"\n デバッグ機能を追加したinference関数\n \"\"\"\n if verbose:\n print(f'>> Inference with model on {len(pairs)} image pairs')\n\n result = []\n\n # Check if all images have the same size\n multiple_shapes = not (check_if_same_size(pairs))\n if multiple_shapes:\n batch_size = 1\n\n for i in tqdm(range(0, len(pairs), batch_size), disable=not verbose, desc=\"MASt3R inference\"):\n batch_pairs = pairs[i:i + batch_size]\n\n # 修正版のcollate関数��使用\n collated = collate_with_cat_fixed(batch_pairs)\n\n if collated is None:\n raise ValueError(f\"collate_with_cat_fixed returned None at batch {i}\")\n\n # 修正版のloss_of_one_batchを使用\n res = loss_of_one_batch_fixed(collated, model, None, device)\n result.append(to_cpu(res))\n\n # ===== ここを修正 =====\n # 結果の集約 - multiple_shapesに関わらず辞書形式で結合\n if len(result) == 0:\n return None\n\n # 各バッチの結果を結合\n combined = {}\n\n for key in result[0].keys():\n if isinstance(result[0][key], dict):\n # 辞書の場合:各フィールドを結合\n combined[key] = {}\n for field in result[0][key].keys():\n values = [r[key][field] for r in result]\n\n if isinstance(values[0], torch.Tensor):\n combined[key][field] = torch.cat(values, dim=0)\n elif isinstance(values[0], list):\n combined[key][field] = []\n for v in values:\n combined[key][field].extend(v if isinstance(v, list) else [v])\n else:\n combined[key][field] = values\n\n elif isinstance(result[0][key], torch.Tensor):\n values = [r[key] for r in result]\n combined[key] = torch.cat(values, dim=0)\n\n elif isinstance(result[0][key], list):\n combined[key] = []\n for r in result:\n combined[key].extend(r[key] if isinstance(r[key], list) else [r[key]])\n\n else:\n combined[key] = result[0][key]\n\n return combined\n\n\n# ============================================================================\n# 3. モンキーパッチを適用(これが最も重要!)\n# ============================================================================\nprint(\"Applying monkey patches...\")\ndust3r.utils.misc.is_symmetrized = is_symmetrized_fixed\ndust3r.inference.inference = inference_debug\ninference = dust3r.inference.inference\n\nprint(\"✓ Monkey-patched dust3r.utils.misc.is_symmetrized\")\nprint(\"✓ Monkey-patched dust3r.inference.inference\")\n\n\n# 確認テスト\nprint(\"\\n=== Verification ===\")\ntest_gt1 = {'instance': '12'}\ntest_gt2 = {'instance': '21'}\ntry:\n result = dust3r.utils.misc.is_symmetrized(test_gt1, test_gt2)\n print(f\"✅ Monkey patch working! is_symmetrized test passed\")\nexcept IndexError as e:\n print(f\"❌ ERROR: Monkey patch failed! {e}\")\n raise\n\n# ===== すべてのdust3rモジュールでis_symmetrizedを置き換え =====\nimport sys\n\nprint(\"\\n=== Patching all loaded dust3r modules ===\")\npatched_count = 0\n\nfor module_name, module in list(sys.modules.items()):\n if module is None:\n continue\n\n # dust3rまたはmast3r関連のモジュール\n if 'dust3r' in module_name or 'mast3r' in module_name:\n # is_symmetrized属性を持っている場合\n if hasattr(module, 'is_symmetrized'):\n old_func = module.is_symmetrized\n module.is_symmetrized = is_symmetrized_fixed\n patched_count += 1\n print(f\" ✓ Patched: {module_name}.is_symmetrized\")\n\n # モジュールの__dict__を直接チェック\n if hasattr(module, '__dict__'):\n for attr_name in list(module.__dict__.keys()):\n attr = getattr(module, attr_name, None)\n if callable(attr) and attr_name == 'is_symmetrized':\n setattr(module, attr_name, is_symmetrized_fixed)\n\nprint(f\"\\n✓ Patched {patched_count} modules\")\nprint(\"=\"*70)\n\n\n","metadata":{"id":"gVeANB1q0W0p","outputId":"c60c2b08-ee64-4d5a-ae65-06ff02ef812b","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def run_mast3r_reconstruction(image_paths, pairs, output_dir, model, device):\n \"\"\"MASt3Rで3D再構成を実行\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n\n # メモリ状態を表示\n print_memory_status(\"Initial memory state\")\n\n print(f\"Processing {len(pairs)} pairs...\")\n\n # 画像サイズを決定(224または512)\n img_size = 224 # MASt3Rのデフォルト推論サイズ\n\n print(f\"Loading {len(image_paths)} images at {img_size}x{img_size}...\")\n\n # 画像を読み込み\n imgs = load_images(image_paths, size=img_size, verbose=True)\n print(f\"Loaded {len(imgs)} images\")\n print_memory_status(\"After loading images\")\n\n # ペアを作成\n print(f\"Creating {len(pairs)} image pairs...\")\n scene_graph = []\n\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n # 画像インデックスが有効か確認\n if idx1 >= len(imgs) or idx2 >= len(imgs):\n print(f\"Warning: Invalid pair ({idx1}, {idx2}), skipping...\")\n continue\n\n # ペアを作成\n view1 = imgs[idx1]\n view2 = imgs[idx2]\n\n # viewがNoneでないか確認\n if view1 is None or view2 is None:\n print(f\"Warning: None view in pair ({idx1}, {idx2}), skipping...\")\n continue\n\n # viewが辞書形式か確認\n if not isinstance(view1, dict) or not isinstance(view2, dict):\n print(f\"Warning: Invalid view type in pair ({idx1}, {idx2})\")\n print(f\" view1 type: {type(view1)}, view2 type: {type(view2)}\")\n continue\n\n scene_graph.append((view1, view2))\n\n print(f\"Valid pairs: {len(scene_graph)}\")\n\n if len(scene_graph) == 0:\n raise ValueError(\"No valid pairs to process!\")\n\n # 最初のペアをデバッグ\n print(\"\\n=== Debugging first pair ===\")\n first_pair = scene_graph[0]\n print(f\"Pair type: {type(first_pair)}\")\n print(f\"View1 type: {type(first_pair[0])}\")\n print(f\"View2 type: {type(first_pair[1])}\")\n if isinstance(first_pair[0], dict):\n print(f\"View1 keys: {list(first_pair[0].keys())}\")\n if isinstance(first_pair[1], dict):\n print(f\"View2 keys: {list(first_pair[1].keys())}\")\n\n # MASt3Rで推論\n print(f\"\\nRunning MASt3R inference on {len(scene_graph)} pairs...\")\n try:\n pairs_output = inference(\n scene_graph,\n model,\n device,\n batch_size=1,\n verbose=True\n )\n except Exception as e:\n print(f\"Error during inference: {e}\")\n print(f\"Error type: {type(e)}\")\n import traceback\n traceback.print_exc()\n raise\n\n print(f\"Inference complete, got {len(pairs_output)} results\")\n print_memory_status(\"After inference\")\n\n # Global alignmentを実行\n print(\"\\n=== Running Global Alignment ===\")\n scene = global_aligner(\n pairs_output,\n device=device,\n mode=GlobalAlignerMode.PointCloudOptimizer,\n verbose=True\n )\n\n # 最適化\n print(\"Optimizing scene...\")\n loss = scene.compute_global_alignment(\n init='mst',\n niter=300,\n schedule='cosine',\n lr=0.01\n )\n\n print(f\"Optimization complete, final loss: {loss:.4f}\")\n print_memory_status(\"After optimization\")\n\n # COLMAP形式で保存\n colmap_dir = Path(output_dir) / \"colmap\"\n colmap_dir.mkdir(parents=True, exist_ok=True)\n\n print(f\"\\n=== Saving to COLMAP format ===\")\n save_colmap_format(scene, imgs, colmap_dir)\n\n print(f\"✓ COLMAP data saved to {colmap_dir}\")\n\n return scene, colmap_dir\n\n\n","metadata":{"id":"jC7gd4-ktXiz","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def save_colmap_format(scene, imgs, output_dir):\n \"\"\"シーンをCOLMAP形式で保存\"\"\"\n from dust3r.cloud_opt.base_opt import BasePCOptimizer\n\n output_dir = Path(output_dir)\n output_dir.mkdir(parents=True, exist_ok=True)\n\n # カメラパラメータを取得\n focals = scene.get_focals().cpu().numpy()\n principal_points = scene.get_principal_points().cpu().numpy()\n poses = scene.get_im_poses().cpu().numpy()\n pts3d = scene.get_pts3d().cpu().numpy()\n\n n_images = len(imgs)\n\n # cameras.txt\n cameras_file = output_dir / \"cameras.txt\"\n with open(cameras_file, 'w') as f:\n f.write(\"# Camera list with one line of data per camera:\\n\")\n f.write(\"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\\n\")\n\n for i in range(n_images):\n # SIMPLE_PINHOLE モデル (f, cx, cy)\n img_shape = imgs[i]['true_shape']\n width, height = img_shape[1], img_shape[0]\n fx = focals[i, 0]\n cx, cy = principal_points[i]\n\n f.write(f\"{i+1} SIMPLE_PINHOLE {width} {height} {fx} {cx} {cy}\\n\")\n\n print(f\"✓ Saved cameras.txt with {n_images} cameras\")\n\n # images.txt\n images_file = output_dir / \"images.txt\"\n with open(images_file, 'w') as f:\n f.write(\"# Image list with two lines of data per image:\\n\")\n f.write(\"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n\")\n f.write(\"# POINTS2D[] as (X, Y, POINT3D_ID)\\n\")\n\n for i in range(n_images):\n # ポーズを回転とtranslationに分解\n pose = poses[i]\n R = pose[:3, :3]\n t = pose[:3, 3]\n\n # 回転行列をクォータニオンに変換\n from scipy.spatial.transform import Rotation\n quat = Rotation.from_matrix(R).as_quat() # [x, y, z, w]\n qw, qx, qy, qz = quat[3], quat[0], quat[1], quat[2]\n\n img_name = Path(imgs[i]['filepath']).name\n\n f.write(f\"{i+1} {qw} {qx} {qy} {qz} {t[0]} {t[1]} {t[2]} {i+1} {img_name}\\n\")\n f.write(\"\\n\") # 2D pointsの行(空)\n\n print(f\"✓ Saved images.txt with {n_images} images\")\n\n # points3D.txt\n points_file = output_dir / \"points3D.txt\"\n with open(points_file, 'w') as f:\n f.write(\"# 3D point list with one line of data per point:\\n\")\n f.write(\"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\\n\")\n\n # 全画像の3Dポイントを集約\n point_id = 1\n for i in range(n_images):\n pts = pts3d[i] # (H, W, 3)\n\n # 有効なポイントのみ保存\n valid_mask = ~np.isnan(pts).any(axis=-1)\n valid_pts = pts[valid_mask]\n\n for pt in valid_pts[:1000]: # 各画像から最大1000点\n # デフォルトカラー(グレー)\n f.write(f\"{point_id} {pt[0]} {pt[1]} {pt[2]} 128 128 128 0.0\\n\")\n point_id += 1\n\n print(f\"✓ Saved points3D.txt with {point_id-1} points\")\n\n\ndef print_memory_status(label=\"\"):\n \"\"\"メモリ使用状況を表示\"\"\"\n import psutil\n\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"{label}:\")\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n\n cpu_percent = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_percent:.1f}%\")","metadata":{"id":"BJDvEDwBnlJm","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#v26\ndef extract_colmap_data(scene, image_paths, max_points=1000000):\n \"\"\"\n Extract COLMAP-compatible camera parameters and 3D points from MASt3R scene\n \n Args:\n scene: MASt3R scene object\n image_paths: List of image paths\n max_points: Maximum number of 3D points to extract (default: 1M)\n \"\"\"\n print(\"\\n=== Extracting COLMAP-compatible data ===\")\n \n # Extract point cloud\n pts_all = scene.get_pts3d()\n print(f\"pts_all type: {type(pts_all)}\")\n \n if isinstance(pts_all, list):\n print(f\"pts_all is a list with {len(pts_all)} elements\")\n if len(pts_all) > 0:\n print(f\"First element type: {type(pts_all[0])}\")\n if hasattr(pts_all[0], 'shape'):\n print(f\"First element shape: {pts_all[0].shape}\")\n \n pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p) \n for p in pts_all])\n print(f\"pts_all shape after conversion: {pts_all.shape}\")\n \n if len(pts_all.shape) == 4:\n print(f\"Found batched point cloud: {pts_all.shape}\")\n B, H, W, _ = pts_all.shape\n pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy() \n \n # Extract colors\n colors = []\n for img_path in image_paths:\n img = Image.open(img_path).resize((W, H))\n colors.append(np.array(img))\n colors = np.stack(colors).reshape(-1, 3) / 255.0\n else:\n pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n colors = np.ones((len(pts3d), 3)) * 0.5\n \n print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n \n # **DOWNSAMPLE POINTS TO REDUCE MEMORY USAGE**\n if len(pts3d) > max_points:\n print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points to reduce memory usage...\")\n \n # Remove invalid points first\n valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n pts3d_valid = pts3d[valid_mask]\n colors_valid = colors[valid_mask]\n \n # Random sampling\n indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n pts3d = pts3d_valid[indices]\n colors = colors_valid[indices]\n \n print(f\"✓ Downsampled to {len(pts3d)} points\")\n \n # Extract camera parameters\n print(\"Extracting camera parameters...\")\n \n # 【重要】MASt3Rのポーズはcamera-to-world形式\n # COLMAPはworld-to-camera形式を要求するので逆行列が必要\n poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n \n # camera-to-world を world-to-camera に変換\n poses = []\n for i, pose_c2w in enumerate(poses_c2w):\n # 4x4行列の逆行列を計算\n pose_w2c = np.linalg.inv(pose_c2w)\n poses.append(pose_w2c)\n \n poses = np.array(poses)\n print(f\"Converted to world-to-camera poses for COLMAP\")\n \n # 焦点距離と主点を取得\n focals = scene.get_focals().detach().cpu().numpy()\n pp = scene.get_principal_points().detach().cpu().numpy()\n print(f\"Focals shape: {focals.shape}\")\n print(f\"Principal points shape: {pp.shape}\")\n \n # MASt3Rの処理サイズ(通常224x224)\n mast3r_size = 224.0\n \n cameras = []\n for i, img_path in enumerate(image_paths):\n img = Image.open(img_path)\n W, H = img.size\n \n # 元画像サイズとのスケール比\n scale = W / mast3r_size\n \n # focalsは[N,1]の形式(fx=fyの等方性カメラ)\n if focals.shape[1] == 1:\n focal_mast3r = float(focals[i, 0])\n fx = fy = focal_mast3r * scale\n else:\n fx = float(focals[i, 0]) * scale\n fy = float(focals[i, 1]) * scale\n \n # 主点もスケーリング\n cx = float(pp[i, 0]) * scale\n cy = float(pp[i, 1]) * scale\n \n camera = {\n 'camera_id': i + 1,\n 'model': 'PINHOLE',\n 'width': W,\n 'height': H,\n 'params': [fx, fy, cx, cy]\n }\n cameras.append(camera)\n \n if i == 0:\n print(f\"\\nExample camera 0:\")\n print(f\" Image size: {W}x{H}\")\n print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n print(f\" Pose (first row): {poses[i][0]}\")\n \n print(f\"\\n✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n \n return pts3d, colors, cameras, poses","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 3 & 4: MASt3R Reconstruction (COLMAPの代替)\n# ============================================================================\n\nimport struct\nfrom pathlib import Path\n\ndef import_to_mast3r_and_save_colmap(\n image_dir,\n processed_image_dir,\n feature_dir,\n database_path,\n output_dir,\n pairs,\n single_camera=True\n):\n \"\"\"\n MASt3Rを使用してカメラポーズを推定し、COLMAP形式で保存\n \"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n # MASt3Rモデルのロード\n from mast3r.model import AsymmetricMASt3R\n device = Config.DEVICE\n\n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n print(f\"✓ MASt3R model loaded on {device}\")\n\n # 画像パスの取得\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n\n # MASt3Rで再構成\n scene, mast3r_images = run_mast3r_pairs(\n model, image_paths, pairs, device,\n batch_size=1\n )\n\n # モデルをメモリから削除\n del model\n clear_memory()\n\n # COLMAP形式のデータを抽出\n pts3d, colors, cameras, poses = extract_colmap_data(\n scene, image_paths, max_points=1000000\n )\n\n # COLMAP形式で保存\n sparse_dir = save_colmap_reconstruction(\n pts3d, colors, cameras, poses, image_paths, output_dir\n )\n\n print(f\"\\n✓ MASt3R reconstruction saved in COLMAP format\")\n print(f\" Output: {sparse_dir}\")\n\n return sparse_dir","metadata":{"id":"imNgPK2Phwi8","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.066Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def run_mast3r_mapper(database_path, image_dir, output_dir, pairs, processed_image_dir, max_points=1000000):\n \"\"\"\n MASt3Rを使用したマッピング(COLMAPの代替)\n \n Args:\n database_path: Path to database\n image_dir: Original image directory\n output_dir: Output directory\n pairs: Image pairs\n processed_image_dir: Processed image directory\n max_points: Maximum number of 3D points to extract\n \"\"\"\n print(\"\\n=== MASt3R Mapper (COLMAP Alternative) ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n # MASt3Rモデルのロード\n from mast3r.model import AsymmetricMASt3R\n device = Config.DEVICE\n\n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n print(f\"✓ MASt3R model loaded on {device}\")\n\n # 画像パスの取得\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n\n # MASt3Rで再構成\n scene, mast3r_images = run_mast3r_pairs(\n model, image_paths, pairs, device,\n batch_size=1\n )\n\n # モデルをメモリから削除\n del model\n clear_memory()\n\n # COLMAP形式のデータを抽出\n pts3d, colors, cameras, poses = extract_colmap_data(\n scene, image_paths, max_points=max_points # max_pointsを渡す\n )\n\n # COLMAP形式で保存\n sparse_dir = save_colmap_reconstruction(\n pts3d, colors, cameras, poses, image_paths, output_dir\n )\n\n print(f\"\\n✓ MASt3R reconstruction saved in COLMAP format\")\n print(f\" Output: {sparse_dir}\")\n\n # sparse/0 ディレクトリが存在することを確���\n model_dir = sparse_dir\n if not os.path.exists(model_dir):\n raise RuntimeError(f\"MASt3R reconstruction failed - directory not found: {model_dir}\")\n\n # 必要なファイルが存在することを確認\n required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n for file in required_files:\n file_path = os.path.join(model_dir, file)\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"Required file not found: {file}\")\n\n print(f\"\\n✓ MASt3R reconstruction complete: {model_dir}\")\n return model_dir","metadata":{"trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def load_images_for_mast3r(image_paths, size=224):\n \"\"\"MASt3R用に画像をロード\"\"\"\n print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n\n from dust3r.utils.image import load_images\n\n images = load_images(image_paths, size=size, verbose=True)\n\n return images\n\n\n\n\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, chunk_size=500):\n \"\"\"\n 選択されたペアでMASt3Rを実行(画像もチャンクごとにロード、結合も最適化)\n \"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n import dust3r.inference\n import pickle\n import tempfile\n\n print(f\"Processing {len(pairs)} pairs in chunks of {chunk_size}...\")\n print(\"Note: Images will be loaded on-demand per chunk to save memory\")\n\n # 一時ディレクトリを作成\n temp_dir = tempfile.mkdtemp()\n print(f\"Temporary directory: {temp_dir}\")\n\n # チャンクごとに処理してディスクに保存\n chunk_files = []\n num_chunks = (len(pairs) + chunk_size - 1) // chunk_size\n\n for chunk_idx in range(num_chunks):\n start_idx = chunk_idx * chunk_size\n end_idx = min(start_idx + chunk_size, len(pairs))\n chunk_pairs_indices = pairs[start_idx:end_idx]\n\n print(f\"\\n--- Processing chunk {chunk_idx + 1}/{num_chunks} (pairs {start_idx}-{end_idx}) ---\")\n\n # このチャンクで必要な画像インデックスを収集\n needed_image_indices = set()\n for idx1, idx2 in chunk_pairs_indices:\n needed_image_indices.add(idx1)\n needed_image_indices.add(idx2)\n\n needed_image_indices = sorted(list(needed_image_indices))\n print(f\"Loading {len(needed_image_indices)} unique images for this chunk...\")\n\n # 必要な画像だけロード\n needed_image_paths = [image_paths[i] for i in needed_image_indices]\n chunk_images = load_images_for_mast3r(needed_image_paths, size=Config.MAST3R_IMAGE_SIZE)\n\n # インデックスマッピングを作成(元のインデックス → チャンク内インデックス)\n index_mapping = {orig_idx: new_idx for new_idx, orig_idx in enumerate(needed_image_indices)}\n\n print(f\"Memory after loading chunk images:\")\n get_memory_info()\n\n # 画像ペアを作成(インデックスを変換)\n mast3r_pairs = []\n for idx1, idx2 in tqdm(chunk_pairs_indices, desc=f\"Preparing chunk {chunk_idx + 1}\"):\n new_idx1 = index_mapping[idx1]\n new_idx2 = index_mapping[idx2]\n mast3r_pairs.append([chunk_images[new_idx1], chunk_images[new_idx2]])\n\n # 推論を実行\n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n output = dust3r.inference.inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n\n # ディスクに保存\n chunk_file = os.path.join(temp_dir, f'chunk_{chunk_idx}.pkl')\n with open(chunk_file, 'wb') as f:\n pickle.dump(output, f)\n chunk_files.append(chunk_file)\n\n del mast3r_pairs\n del chunk_images # 画像も削除\n del output\n clear_memory()\n\n print(f\"Chunk {chunk_idx + 1} saved to disk. Memory state:\")\n get_memory_info()\n\n # ディスクから読み込んで結合(メモリ効率化版)\n print(\"\\n=== Combining all chunks from disk ===\")\n\n # まず最初の2チャンクを結合\n print(f\"Loading and combining chunks 1-2...\")\n with open(chunk_files[0], 'rb') as f:\n combined_output = pickle.load(f)\n os.remove(chunk_files[0])\n\n with open(chunk_files[1], 'rb') as f:\n chunk_output = pickle.load(f)\n\n for key in combined_output.keys():\n if isinstance(combined_output[key], dict):\n for field in combined_output[key].keys():\n if isinstance(combined_output[key][field], torch.Tensor):\n combined_output[key][field] = torch.cat([\n combined_output[key][field],\n chunk_output[key][field]\n ], dim=0)\n elif isinstance(combined_output[key][field], list):\n combined_output[key][field].extend(chunk_output[key][field])\n elif isinstance(combined_output[key], torch.Tensor):\n combined_output[key] = torch.cat([\n combined_output[key],\n chunk_output[key]\n ], dim=0)\n elif isinstance(combined_output[key], list):\n combined_output[key].extend(chunk_output[key])\n\n del chunk_output\n os.remove(chunk_files[1])\n clear_memory()\n\n # 残りのチャンクを1つずつ結合\n for idx in range(2, len(chunk_files)):\n print(f\"Loading and combining chunk {idx + 1}/{len(chunk_files)}...\")\n\n with open(chunk_files[idx], 'rb') as f:\n chunk_output = pickle.load(f)\n\n for key in combined_output.keys():\n if isinstance(combined_output[key], dict):\n for field in combined_output[key].keys():\n if isinstance(combined_output[key][field], torch.Tensor):\n # メモリ効率化: 結合後に元のTensorを削除\n old_tensor = combined_output[key][field]\n combined_output[key][field] = torch.cat([\n old_tensor,\n chunk_output[key][field]\n ], dim=0)\n del old_tensor\n elif isinstance(combined_output[key][field], list):\n combined_output[key][field].extend(chunk_output[key][field])\n\n elif isinstance(combined_output[key], torch.Tensor):\n old_tensor = combined_output[key]\n combined_output[key] = torch.cat([\n old_tensor,\n chunk_output[key]\n ], dim=0)\n del old_tensor\n\n elif isinstance(combined_output[key], list):\n combined_output[key].extend(chunk_output[key])\n\n del chunk_output\n os.remove(chunk_files[idx])\n clear_memory()\n\n # 進捗確認\n if (idx + 1) % 3 == 0:\n print(f\" Memory after combining {idx + 1} chunks:\")\n get_memory_info()\n\n os.rmdir(temp_dir)\n\n print(f\"✓ Combined output keys: {list(combined_output.keys())}\")\n print(\"After combining all chunks:\")\n get_memory_info()\n\n print(\"✓ MASt3R inference complete\")\n\n # 最後にグローバルアライメント用に全画像をロード\n print(\"\\nLoading all images for global alignment...\")\n images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n print(\"Memory after loading all images:\")\n get_memory_info()\n\n # グローバルアライメント\n print(\"\\nRunning global alignment...\")\n scene = global_aligner(\n combined_output,\n device=device,\n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n\n del combined_output\n clear_memory()\n\n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\",\n niter=150,\n schedule='cosine',\n lr=0.01\n )\n\n print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n\n return scene, images\n\n\n\n\n\n\n\ndef extract_colmap_data(scene, image_paths, max_points=1000000):\n \"\"\"\n MASt3RシーンからCOLMAP互換のデータを抽出\n \"\"\"\n print(\"\\n=== Extracting COLMAP-compatible data ===\")\n\n pts_all = scene.get_pts3d()\n print(f\"pts_all type: {type(pts_all)}\")\n\n if isinstance(pts_all, list):\n print(f\"pts_all is a list with {len(pts_all)} elements\")\n if len(pts_all) > 0:\n print(f\"First element type: {type(pts_all[0])}\")\n if hasattr(pts_all[0], 'shape'):\n print(f\"First element shape: {pts_all[0].shape}\")\n\n pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p)\n for p in pts_all])\n print(f\"pts_all shape after conversion: {pts_all.shape}\")\n\n if len(pts_all.shape) == 4:\n print(f\"Found batched point cloud: {pts_all.shape}\")\n B, H, W, _ = pts_all.shape\n pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy()\n\n colors = []\n for img_path in image_paths:\n img = Image.open(img_path).resize((W, H))\n colors.append(np.array(img))\n colors = np.stack(colors).reshape(-1, 3) / 255.0\n else:\n pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n colors = np.ones((len(pts3d), 3)) * 0.5\n\n print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n\n if len(pts3d) > max_points:\n print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points...\")\n\n valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n pts3d_valid = pts3d[valid_mask]\n colors_valid = colors[valid_mask]\n\n indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n pts3d = pts3d_valid[indices]\n colors = colors_valid[indices]\n\n print(f\"✓ Downsampled to {len(pts3d)} points\")\n\n print(\"Extracting camera parameters...\")\n\n poses_tensor = scene.get_im_poses()\n print(f\"Retrieved all poses at once: shape {poses_tensor.shape}\")\n poses = poses_tensor.detach().cpu().numpy()\n\n cameras = []\n for i, img_path in enumerate(image_paths):\n img = Image.open(img_path)\n W, H = img.size\n\n K = scene.get_intrinsics()[i].detach().cpu().numpy()\n\n camera = {\n 'camera_id': i + 1,\n 'model': 'PINHOLE',\n 'width': W,\n 'height': H,\n 'params': [K[0, 0], K[1, 1], K[0, 2], K[1, 2]]\n }\n cameras.append(camera)\n\n print(f\"✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n\n return pts3d, colors, cameras, poses\n\n\ndef save_colmap_reconstruction(pts3d, colors, cameras, poses, image_paths, output_dir):\n \"\"\"COLMAP形式で再構成を保存\"\"\"\n print(\"\\n=== Saving COLMAP reconstruction ===\")\n\n sparse_dir = Path(output_dir) / 'sparse' / '0'\n sparse_dir.mkdir(parents=True, exist_ok=True)\n\n print(f\" Writing COLMAP files to {sparse_dir}...\")\n\n write_cameras_binary(cameras, sparse_dir / 'cameras.bin')\n print(f\" ✓ Wrote {len(cameras)} cameras\")\n\n write_images_binary(image_paths, cameras, poses, sparse_dir / 'images.bin')\n print(f\" ✓ Wrote {len(image_paths)} images\")\n\n num_points = write_points3d_binary(pts3d, colors, sparse_dir / 'points3D.bin')\n print(f\" ✓ Wrote {num_points} 3D points\")\n\n print(f\"\\n✓ COLMAP reconstruction saved to {sparse_dir}\")\n\n return sparse_dir\n\n\ndef write_cameras_binary(cameras, output_file):\n \"\"\"cameras.binをCOLMAPバイナリ形式で書き込み\"\"\"\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', len(cameras)))\n\n for i, cam in enumerate(cameras):\n camera_id = cam.get('camera_id', i + 1)\n model_id = 1\n width = cam['width']\n height = cam['height']\n params = cam['params']\n\n f.write(struct.pack('i', camera_id))\n f.write(struct.pack('i', model_id))\n f.write(struct.pack('Q', width))\n f.write(struct.pack('Q', height))\n\n for param in params[:4]:\n f.write(struct.pack('d', param))\n\n\ndef write_images_binary(image_paths, cameras, poses, output_file):\n \"\"\"images.binをCOLMAPバイナリ形式で書き込み\"\"\"\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', len(image_paths)))\n\n for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n image_id = i + 1\n camera_id = cameras[i].get('camera_id', i + 1)\n image_name = os.path.basename(img_path)\n\n R = pose[:3, :3]\n t = pose[:3, 3]\n\n qvec = rotmat2qvec(R)\n tvec = t\n\n f.write(struct.pack('i', image_id))\n\n for q in qvec:\n f.write(struct.pack('d', float(q)))\n\n for tv in tvec:\n f.write(struct.pack('d', float(tv)))\n\n f.write(struct.pack('i', camera_id))\n f.write(image_name.encode('utf-8') + b'\\x00')\n f.write(struct.pack('Q', 0))\n\n\ndef write_points3d_binary(pts3d, colors, output_file):\n \"\"\"points3D.binをCOLMAPバイナリ形式で書き込み\"\"\"\n valid_indices = []\n for i, pt in enumerate(pts3d):\n if not (np.isnan(pt).any() or np.isinf(pt).any()):\n valid_indices.append(i)\n\n with open(output_file, 'wb') as f:\n f.write(struct.pack('Q', len(valid_indices)))\n\n for idx, point_id in enumerate(valid_indices):\n pt = pts3d[point_id]\n color = colors[point_id]\n\n f.write(struct.pack('Q', point_id))\n\n for coord in pt:\n f.write(struct.pack('d', float(coord)))\n\n col_int = (color * 255).astype(np.uint8)\n for c in col_int:\n f.write(struct.pack('B', int(c)))\n\n f.write(struct.pack('d', 0.0))\n f.write(struct.pack('Q', 0))\n\n if (idx + 1) % 1000000 == 0:\n print(f\" Wrote {idx + 1} / {len(valid_indices)} points...\")\n\n return len(valid_indices)\n\n\ndef rotmat2qvec(R):\n \"\"\"回転行列をクォータニオンに変換\"\"\"\n R = np.asarray(R, dtype=np.float64)\n trace = np.trace(R)\n\n if trace > 0:\n s = 0.5 / np.sqrt(trace + 1.0)\n w = 0.25 / s\n x = (R[2, 1] - R[1, 2]) * s\n y = (R[0, 2] - R[2, 0]) * s\n z = (R[1, 0] - R[0, 1]) * s\n elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n w = (R[2, 1] - R[1, 2]) / s\n x = 0.25 * s\n y = (R[0, 1] + R[1, 0]) / s\n z = (R[0, 2] + R[2, 0]) / s\n elif R[1, 1] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n w = (R[0, 2] - R[2, 0]) / s\n x = (R[0, 1] + R[1, 0]) / s\n y = 0.25 * s\n z = (R[1, 2] + R[2, 1]) / s\n else:\n s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n w = (R[1, 0] - R[0, 1]) / s\n x = (R[0, 2] + R[2, 0]) / s\n y = (R[1, 2] + R[2, 1]) / s\n z = 0.25 * s\n\n qvec = np.array([w, x, y, z], dtype=np.float64)\n qvec = qvec / np.linalg.norm(qvec)\n\n return qvec\n\n\n# メモリ管理ユーティリティ(必要に応じて追加)\ndef clear_memory():\n \"\"\"GPUとCPUメモリを積極的にクリア\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\n\ndef get_memory_info():\n \"\"\"現在のメモリ使用状況を取得\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n\n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")","metadata":{"trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"","metadata":{"id":"0LHFaucWicrB"}},{"cell_type":"code","source":"# ============================================================================\n# Step 3: Import to COLMAP (too be removed)\n# ============================================================================\n\ndef import_to_colmap(image_dir, feature_dir, database_path, single_camera=True):\n \"\"\"\n Import features and matches to COLMAP database\n\n Args:\n image_dir (str): Directory containing the images.\n feature_dir (str): Directory to save/load extracted features.\n database_path (str): Path to the database file.\n single_camera (bool): Set to True if all images have the same dimensions (e.g., pre-resized).\n \"\"\"\n print(\"\\n=== Creating COLMAP Database ===\")\n\n if os.path.exists(database_path):\n os.remove(database_path)\n print(f\"✓ Removed existing database\")\n\n db = COLMAPDatabase.connect(database_path)\n db.create_tables()\n\n print(f\"Single camera mode: {single_camera}\")\n\n image_files = [f for f in os.listdir(image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))]\n if not image_files:\n raise ValueError(f\"No images found in {image_dir}\")\n\n first_image = sorted(image_files)[0]\n img_ext = os.path.splitext(first_image)[1]\n print(f\"Detected image extension: '{img_ext}'\")\n\n fname_to_id = add_keypoints(\n db,\n feature_dir,\n image_dir,\n img_ext,\n 'PINHOLE',\n single_camera=single_camera\n )\n\n add_matches(db, feature_dir, fname_to_id)\n db.commit()\n db.close()\n\n print(f\"✓ Database created: {database_path}\")\n\n# ============================================================================\n# Step 4: Run COLMAP Mapper\n# ============================================================================\n\ndef run_colmap_mapper(database_path, image_dir, output_dir):\n \"\"\"\n Run COLMAP mapper with verbose output\n \"\"\"\n print(\"\\n=== Running COLMAP Reconstruction ===\")\n os.makedirs(output_dir, exist_ok=True)\n cmd = [\n 'colmap', 'mapper',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--output_path', output_dir,\n '--Mapper.ba_refine_focal_length', '0',\n '--Mapper.ba_refine_principal_point', '0',\n '--Mapper.ba_refine_extra_params', '0',\n '--Mapper.min_num_matches', '15',\n '--Mapper.init_min_num_inliers', '50',\n '--Mapper.max_num_models', '1',\n '--Mapper.num_threads', '16',\n ]\n print(f\"Command: {' '.join(cmd)}\\n\")\n\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n for line in process.stdout:\n print(line, end='')\n process.wait()\n if process.returncode == 0:\n model_dir = os.path.join(output_dir, '0')\n if os.path.exists(model_dir):\n print(f\"\\n✓ COLMAP reconstruction complete: {model_dir}\")\n return model_dir\n raise RuntimeError(\"COLMAP reconstruction failed\")","metadata":{"id":"NJedFruCmVcL","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 5: Convert to Gaussian Splatting Format (if needed)\n# ============================================================================\n\ndef convert_to_gs_format(colmap_model_dir, output_dir):\n \"\"\"\n Verify COLMAP output and prepare paths for Gaussian Splatting.\n\n Args:\n colmap_model_dir (str): Path to the COLMAP sparse/0 directory.\n Example: /content/output/colmap/sparse/0\n output_dir (str): Base output directory.\n\n Returns:\n colmap_parent_dir (str): The path to be passed to Gaussian Splatting.\n Example: /content/output/colmap (Parent directory containing 'sparse/')\n \"\"\"\n print(\"\\n=== Verifying COLMAP Model for Gaussian Splatting ===\")\n\n import pycolmap\n reconstruction = pycolmap.Reconstruction(colmap_model_dir)\n\n print(f\"Registered images: {len(reconstruction.images)}\")\n print(f\"3D points: {len(reconstruction.points3D)}\")\n\n # Check for files required by Gaussian Splatting\n required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n for file in required_files:\n file_path = os.path.join(colmap_model_dir, file)\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"Required file not found: {file}\")\n print(f\" ✓ {file}\")\n\n # Return the grandparent directory of sparse/0\n # /content/output/colmap/sparse/0 -> /content/output/colmap\n colmap_parent_dir = os.path.dirname(os.path.dirname(colmap_model_dir))\n\n print(f\"\\n✓ COLMAP model ready for Gaussian Splatting\")\n print(f\" Source path: {colmap_parent_dir}\")\n\n return colmap_parent_dir","metadata":{"id":"4IioqnC1mVcM","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=6000):\n \"\"\"\n Gaussian Splattingモデルをトレーニング\n \"\"\"\n print(\"\\n=== Training Gaussian Splatting ===\")\n\n # 環境の修正\n print(\"Checking and fixing Python environment...\")\n import subprocess\n subprocess.run([\"pip\", \"install\", \"--upgrade\", \"--force-reinstall\", \"-q\", \"scipy\", \"scikit-learn\"], check=True)\n print(\"✓ Environment fixed\")\n\n # COLMAPのsparseディレクトリを確認\n sparse_dir = os.path.join(colmap_dir, 'sparse', '0')\n if not os.path.exists(sparse_dir):\n raise FileNotFoundError(f\"COLMAP sparse directory not found: {sparse_dir}\")\n\n print(f\"COLMAP sparse model: {sparse_dir}\")\n print(f\"Training images: {image_dir}\")\n print(f\"Output: {output_dir}\")\n print(f\"Iterations: {iterations}\")\n\n os.makedirs(output_dir, exist_ok=True)\n\n # Gaussian Splattingのディレクトリに移動\n original_dir = os.getcwd()\n os.chdir(\"/content/gaussian-splatting\")\n\n # トレーニングコマンド\n cmd = [\n \"python\", \"train.py\",\n \"-s\", colmap_dir,\n \"--images\", image_dir,\n \"-m\", output_dir,\n \"--iterations\", str(iterations),\n \"--test_iterations\", str(iterations // 2), str(iterations),\n \"--save_iterations\", str(iterations // 2), str(iterations)\n ]\n\n print(f\"\\nCommand: {' '.join(cmd)}\\n\")\n\n # 実行\n result = subprocess.run(cmd, capture_output=True, text=True)\n\n # 元のディレクトリに戻る\n os.chdir(original_dir)\n\n print(\"STDOUT:\", result.stdout)\n if result.stderr:\n print(\"STDERR:\", result.stderr)\n\n if result.returncode != 0:\n raise RuntimeError(\"Gaussian Splatting training failed\")\n\n # 生成されたPLYファイルの存在確認\n ply_path = os.path.join(output_dir, f\"point_cloud/iteration_{iterations}/point_cloud.ply\")\n if not os.path.exists(ply_path):\n raise FileNotFoundError(f\"Expected output file not found: {ply_path}\")\n\n print(f\"\\n✓ Gaussian Splatting training complete!\")\n print(f\" Model saved to: {output_dir}\")\n print(f\" Point cloud: {ply_path}\")\n\n return output_dir","metadata":{"id":"EiHoRSfzQ01b","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from PIL import Image, ImageFilter","metadata":{"id":"yeP98DO30gNl","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import numpy as np\nprint(f\"✓ NumPy: {np.__version__}\")","metadata":{"id":"D0r5QQNg2GPl","outputId":"74932505-08da-43f9-d0d3-8831e52569f7","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# メインパイプライン関数(修正版)\n# ============================================================================\ndef main_pipeline(IMAGE_DIR, OUTPUT_DIR, square_size=1024, iterations=2000,\n max_images=None, max_pairs=1000, max_points=100000):\n \"\"\"\n Main pipeline for DINO + LightGlue -> MASt3R -> Gaussian Splatting\n \n Args:\n IMAGE_DIR: Directory containing input images\n OUTPUT_DIR: Directory for output files\n square_size: Size to resize images for processing\n iterations: Number of training iterations\n max_images: Maximum number of images to process (None = all)\n max_pairs: Maximum number of image pairs for matching\n max_points: Maximum number of 3D points to extract\n \"\"\"\n print(\"=\"*70)\n print(\"Gaussian Splatting Preparation Pipeline (MASt3R Version)\")\n print(\"=\"*70)\n \n # パスの設定\n feature_dir = os.path.join(OUTPUT_DIR, 'features')\n colmap_dir = os.path.join(OUTPUT_DIR, 'colmap')\n database_path = os.path.join(colmap_dir, 'database.db')\n sparse_dir = os.path.join(colmap_dir, 'sparse')\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n os.makedirs(colmap_dir, exist_ok=True)\n \n # Step 0: 画像の前処理(max_images適用前の元画像を対象)\n print(\"\\n\" + \"=\"*70)\n print(\"Step 0: Image Preprocessing\")\n print(\"=\"*70)\n \n # 元画像のパスを取得\n original_image_paths = sorted([\n os.path.join(IMAGE_DIR, f)\n for f in os.listdir(IMAGE_DIR)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n \n # max_images制限を元画像に適用\n if max_images and len(original_image_paths) > max_images:\n print(f\"\\n⚠️ Limiting to {max_images} original images (from {len(original_image_paths)})\")\n original_image_paths = original_image_paths[:max_images]\n \n print(f\"Processing {len(original_image_paths)} original images → ~{len(original_image_paths)*2} after biplet-square\")\n \n # 選択された画像を一時ディレクトリにコピー\n temp_dir = os.path.join(OUTPUT_DIR, \"temp_originals\")\n os.makedirs(temp_dir, exist_ok=True)\n \n import shutil\n for img_path in original_image_paths:\n shutil.copy(img_path, temp_dir)\n \n # 正方形フォーマットに変換\n processed_image_dir = os.path.join(OUTPUT_DIR, \"processed_images\")\n normalize_image_sizes_biplet(\n input_dir=temp_dir,\n output_dir=processed_image_dir,\n size=square_size\n )\n \n # 一時ディレクトリをクリーンアップ\n shutil.rmtree(temp_dir)\n \n # 処理後の画像パスを取得\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n \n print(f\"\\n📸 Processing {len(image_paths)} images (after biplet-square)\")\n print(f\"⚠️ Will use maximum {max_pairs} pairs to save memory\")\n print(f\"⚠️ Will extract maximum {max_points} 3D points\")\n \n # Step 1: 画像ペアを生成(max_pairs制限付き)\n print(\"\\n\" + \"=\"*70)\n print(\"Step 1: Image Pair Selection\")\n print(\"=\"*70)\n \n pairs, features = get_image_pairs(image_paths, max_pairs=max_pairs)\n \n print(f\"✓ Selected {len(pairs)} pairs for matching\")\n \n # Step 2: LightGlueで特徴マッチング\n print(\"\\n\" + \"=\"*70)\n print(\"Step 2: LightGlue Feature Matching\")\n print(\"=\"*70)\n \n match_pairs_lightglue(image_paths, pairs, features, feature_dir)\n \n ######### Step 3 and Step 4: MASt3R for reconstruction (not COLMAP) #########\n # Step 3 & 4: MASt3Rでカメラポーズを推定し、COLMAP形式で保存\n print(\"\\n\" + \"=\"*70)\n print(\"Step 3 & 4: MASt3R Reconstruction → COLMAP Format\")\n print(\"=\"*70)\n \n model_dir = run_mast3r_mapper(\n database_path=database_path,\n image_dir=IMAGE_DIR,\n output_dir=colmap_dir,\n pairs=pairs,\n processed_image_dir=processed_image_dir,\n max_points=max_points # max_points制限を追加\n )\n ###############################################################################\n \n # Step 5: Gaussian Splattingの準備を確認\n print(\"\\n\" + \"=\"*70)\n print(\"Step 5: Convert to Gaussian Splatting Format\")\n print(\"=\"*70)\n \n colmap_parent = convert_to_gs_format(model_dir, OUTPUT_DIR)\n \n # Step 6: Gaussian Splattingモデルをトレーニング\n print(\"\\n\" + \"=\"*70)\n print(\"Step 6: Training Gaussian Splatting\")\n print(\"=\"*70)\n \n gs_output = train_gaussian_splatting(\n colmap_dir=colmap_parent,\n image_dir=processed_image_dir,\n output_dir=OUTPUT_DIR,\n iterations=iterations\n )\n \n print(\"\\n\" + \"=\"*70)\n print(\"✅ Full Pipeline Successfully Completed!\")\n print(\"=\"*70)\n print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n print(f\"\\nPipeline Summary:\")\n print(f\" - Original images: {len(original_image_paths)}\")\n print(f\" - Processed images: {len(image_paths)}\")\n print(f\" - Image pairs: {len(pairs)}\")\n print(f\" - Max 3D points: {max_points}\")\n \n return gs_output","metadata":{"id":"Ppkm3NVGwtjO","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# 使用例\nif __name__ == \"__main__\":\n # 実際のデータセットで実行する場合の推奨設定\n IMAGE_DIR = \"/kaggle/input/two-dogs/fountain80/fountain80\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n\n # 本番用の設定:\n # - square_size: 1024 (高品質) または 512 (バランス)\n # - iterations: 6000 (推奨) または 30000 (高品質だが時間がかかる)\n\n gs_output = main_pipeline(\n IMAGE_DIR,\n OUTPUT_DIR,\n square_size=1024,\n iterations=2000,\n max_images=None, # 最大50枚の画像\n max_pairs=2000, # 最大1000ペア\n max_points=100000 # 最大10万ポイント\n )","metadata":{"id":"66uQIWbs2a1t","outputId":"481e53ad-6a09-4ed1-b5ac-0d071350e1d4","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\ntorch.cuda.empty_cache()","metadata":{"id":"SZzD2-K6islN","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"print('Congratulation! Successsfully Completed!')","metadata":{"id":"8jhKKtTqwv7O","outputId":"961a2f66-099e-4efa-ec0f-4fad620f6e09","trusted":true,"execution":{"execution_failed":"2026-01-20T05:55:54.067Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"id":"VQsLeKY8Rl8Y","trusted":true},"outputs":[],"execution_count":null}]}