Delete biplet-asmk-mast3r-ps2-gs-kg-28.ipynb
Browse files
biplet-asmk-mast3r-ps2-gs-kg-28.ipynb
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU","kaggle":{"accelerator":"none","dataSources":[{"sourceId":14571475,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **biplet-asmk-mast3r-ps2-gs-kg** \n\n","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"https://www.kaggle.com/code/stpeteishii/dino-mast3r-gs-kg-34","metadata":{}},{"cell_type":"code","source":"!pip install roma einops timm huggingface_hub\n!pip install opencv-python pillow tqdm pyaml cython plyfile\n!pip install pycolmap trimesh\n!pip uninstall -y numpy scipy\n!pip install numpy==1.26.4 scipy==1.11.4\n\nbreak","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# restart, then run after","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:17.401044Z","iopub.execute_input":"2026-01-22T05:38:17.401331Z","iopub.status.idle":"2026-01-22T05:38:17.406092Z","shell.execute_reply.started":"2026-01-22T05:38:17.401306Z","shell.execute_reply":"2026-01-22T05:38:17.405179Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"import numpy as np\nprint(f\"โ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:17.407573Z","iopub.execute_input":"2026-01-22T05:38:17.408089Z","iopub.status.idle":"2026-01-22T05:38:19.625430Z","shell.execute_reply.started":"2026-01-22T05:38:17.408056Z","shell.execute_reply":"2026-01-22T05:38:19.624754Z"}},"outputs":[{"name":"stdout","text":"โ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 1.26.4\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n","output_type":"stream"}],"execution_count":2},{"cell_type":"code","source":"# romaใๅฟ
่ฆใชๆ็นใง็ขบ่ชใปใคใณในใใผใซ\ntry:\n import roma\n print(\"โ roma is installed\")\nexcept ModuleNotFoundError:\n print(\"โ ๏ธ roma not found, installing...\")\n !pip install roma\n import roma\n print(\"โ roma installed\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:19.626783Z","iopub.execute_input":"2026-01-22T05:38:19.627402Z","iopub.status.idle":"2026-01-22T05:38:21.246270Z","shell.execute_reply.started":"2026-01-22T05:38:19.627363Z","shell.execute_reply":"2026-01-22T05:38:21.245592Z"}},"outputs":[{"name":"stdout","text":"โ roma is installed\n","output_type":"stream"}],"execution_count":3},{"cell_type":"code","source":"import os\nimport sys\n\n# MASt3Rใใฏใญใผใณ\nif not os.path.exists('/kaggle/working/mast3r'):\n print(\"Cloning MASt3R repository...\")\n !git clone --recursive https://github.com/naver/mast3r.git /kaggle/working/mast3r\n print(\"โ MASt3R cloned\")\nelse:\n print(\"โ MASt3R already exists\")\n\n# DUSt3Rใใฏใญใผใณ๏ผMASt3Rๅ
ใซๅฟ
่ฆ๏ผ\nif not os.path.exists('/kaggle/working/mast3r/dust3r'):\n print(\"Cloning DUSt3R repository...\")\n !git clone --recursive https://github.com/naver/dust3r.git /kaggle/working/mast3r/dust3r\n print(\"โ DUSt3R cloned\")\nelse:\n print(\"โ DUSt3R already exists\")\n\n# ASMKใใฏใญใผใณ\nif not os.path.exists('/kaggle/working/asmk'):\n print(\"Cloning ASMK repository...\")\n !git clone https://github.com/jenicek/asmk.git /kaggle/working/asmk\n print(\"โ ASMK cloned\")\nelse:\n print(\"โ ASMK already exists\")\n\n# ใในใ่ฟฝๅ \nsys.path.insert(0, '/kaggle/working/mast3r')\nsys.path.insert(0, '/kaggle/working/mast3r/dust3r')\nsys.path.insert(0, '/kaggle/working/asmk')\n\n# ็ขบ่ช\ntry:\n from dust3r.model import AsymmetricCroCo3DStereo\n print(\"โ dust3r.model imported successfully\")\nexcept ImportError as e:\n print(f\"โ Import error: {e}\")\n\n# croco๏ผMASt3Rใฎไพๅญ้ขไฟ๏ผใใฏใญใผใณ\nif not os.path.exists('/kaggle/working/mast3r/croco'):\n print(\"Cloning CroCo repository...\")\n !git clone --recursive https://github.com/naver/croco.git /kaggle/working/mast3r/croco\n print(\"โ CroCo cloned\")\n\n# CroCo v2ใฎไพๅญ้ขไฟ\nif not os.path.exists('/kaggle/working/mast3r/croco/models/curope'):\n print(\"Cloning CuRoPe...\")\n !git clone --recursive https://github.com/naver/curope.git /kaggle/working/mast3r/croco/models/curope\n print(\"โ CuRoPe cloned\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:21.247153Z","iopub.execute_input":"2026-01-22T05:38:21.247513Z","iopub.status.idle":"2026-01-22T05:38:21.484180Z","shell.execute_reply.started":"2026-01-22T05:38:21.247479Z","shell.execute_reply":"2026-01-22T05:38:21.483597Z"}},"outputs":[{"name":"stdout","text":"โ MASt3R already exists\nโ DUSt3R already exists\nโ ASMK already exists\nWarning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead\nโ dust3r.model imported successfully\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"# =====================================================================\n# STEP 2: Clone Gaussian Splatting\n# =====================================================================\nprint(\"\\n\" + \"=\"*70)\nprint(\"STEP 2: Clone Gaussian Splatting\")\nprint(\"=\"*70)\nWORK_DIR = \"/kaggle/working/gaussian-splatting\"\n\nimport subprocess\nif not os.path.exists(WORK_DIR):\n subprocess.run([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ], capture_output=True)\n print(\"โ Cloned\")\nelse:\n print(\"โ Already exists\")\n\n# ใคใณในใใผใซใๅฟ
่ฆใชใใฃใฌใฏใใช\nsubmodules = [\n \"/kaggle/working/gaussian-splatting/submodules/diff-gaussian-rasterization\",\n \"/kaggle/working/gaussian-splatting/submodules/simple-knn\"\n]\n\nfor path in submodules:\n print(f\"Installing {path}...\")\n # -e ใฏ็ทจ้ๅฏ่ฝใขใผใใไธ่ฆใชใๅคใใฆใOKใงใ\n subprocess.run([\"pip\", \"install\", path], check=True)\n\nprint(\"โ Custom CUDA modules installed.\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:21.486147Z","iopub.execute_input":"2026-01-22T05:38:21.486358Z","iopub.status.idle":"2026-01-22T05:38:43.880746Z","shell.execute_reply.started":"2026-01-22T05:38:21.486337Z","shell.execute_reply":"2026-01-22T05:38:43.879806Z"}},"outputs":[{"name":"stdout","text":"\n======================================================================\nSTEP 2: Clone Gaussian Splatting\n======================================================================\nโ Already exists\nInstalling /kaggle/working/gaussian-splatting/submodules/diff-gaussian-rasterization...\nProcessing ./gaussian-splatting/submodules/diff-gaussian-rasterization\n Preparing metadata (setup.py): started\n Preparing metadata (setup.py): finished with status 'done'\nBuilding wheels for collected packages: diff_gaussian_rasterization\n Building wheel for diff_gaussian_rasterization (setup.py): started\n Building wheel for diff_gaussian_rasterization (setup.py): finished with status 'done'\n Created wheel for diff_gaussian_rasterization: filename=diff_gaussian_rasterization-0.0.0-cp312-cp312-linux_x86_64.whl size=3455610 sha256=54aac8438171b3a7d65dd54f2be8ad8e9ccc7088351792f28f598ee60818c790\n Stored in directory: /root/.cache/pip/wheels/ba/99/d3/014520068aca8c2e8bdc358ca774581380cadb65788559b3ea\nSuccessfully built diff_gaussian_rasterization\nInstalling collected packages: diff_gaussian_rasterization\n Attempting uninstall: diff_gaussian_rasterization\n Found existing installation: diff_gaussian_rasterization 0.0.0\n Uninstalling diff_gaussian_rasterization-0.0.0:\n Successfully uninstalled diff_gaussian_rasterization-0.0.0\nSuccessfully installed diff_gaussian_rasterization-0.0.0\nInstalling /kaggle/working/gaussian-splatting/submodules/simple-knn...\nProcessing ./gaussian-splatting/submodules/simple-knn\n Preparing metadata (setup.py): started\n Preparing metadata (setup.py): finished with status 'done'\nBuilding wheels for collected packages: simple_knn\n Building wheel for simple_knn (setup.py): started\n Building wheel for simple_knn (setup.py): finished with status 'done'\n Created wheel for simple_knn: filename=simple_knn-0.0.0-cp312-cp312-linux_x86_64.whl size=3212333 sha256=20ac36edf7ba8e3c08a4d86175d1719e56fd158bee5f68a861738f35011df4a7\n Stored in directory: /root/.cache/pip/wheels/ca/30/df/7f4f362d12edead48c699acde5962cbb06ca05033b9d970934\nSuccessfully built simple_knn\nInstalling collected packages: simple_knn\n Attempting uninstall: simple_knn\n Found existing installation: simple_knn 0.0.0\n Uninstalling simple_knn-0.0.0:\n Successfully uninstalled simple_knn-0.0.0\nSuccessfully installed simple_knn-0.0.0\nโ Custom CUDA modules installed.\n","output_type":"stream"}],"execution_count":5},{"cell_type":"code","source":"import numpy as np\nprint(f\"โ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:43.882099Z","iopub.execute_input":"2026-01-22T05:38:43.882346Z","iopub.status.idle":"2026-01-22T05:38:46.029385Z","shell.execute_reply.started":"2026-01-22T05:38:43.882323Z","shell.execute_reply":"2026-01-22T05:38:46.028453Z"}},"outputs":[{"name":"stdout","text":"โ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 1.26.4\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n","output_type":"stream"}],"execution_count":6},{"cell_type":"code","source":"import os\nimport sys\nimport gc\nimport torch\nimport numpy as np\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport torch.nn.functional as F\n\n# ======================================================================\n# MEMORY MANAGEMENT\n# ======================================================================\n\nos.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'\n\ndef clear_memory():\n \"\"\"ใกใขใชใฏใชใข้ขๆฐ\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\n\n# ======================================================================\n# CONFIGURATION\n# ======================================================================\n\nclass Config:\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # ๆญฃใใMASt3Rใขใใซๅ\n MAST3R_WEIGHTS = \"naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\"\n DUST3R_WEIGHTS = \"naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\" # ใใฉใผใซใใใฏ็จ\n RETRIEVAL_TOPK = 10\n IMAGE_SIZE = 224 # ใกใขใช็ฏ็ดใฎใใ224ใซ่จญๅฎ\n\n\n# ======================================================================\n# IMAGE PREPROCESSING\n# ======================================================================\n\nimport os\nimport shutil\nfrom PIL import Image\nfrom tqdm import tqdm","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.030863Z","iopub.execute_input":"2026-01-22T05:38:46.031236Z","iopub.status.idle":"2026-01-22T05:38:46.096145Z","shell.execute_reply.started":"2026-01-22T05:38:46.031206Z","shell.execute_reply":"2026-01-22T05:38:46.095449Z"}},"outputs":[],"execution_count":7},{"cell_type":"code","source":"# ======================================================================\n# IMAGE PREPROCESSING\n# ======================================================================\n\nimport os\nimport shutil\nfrom PIL import Image\nfrom tqdm import tqdm\n\n# ======================================================================\n# IMAGE PREPROCESSING\n# ======================================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n Bipletใฏใญใใใฎใฟ็ๆ๏ผๅ
็ปๅใฏใณใใผใใชใ๏ผ\n \"\"\"\n if output_dir is None:\n output_dir = input_dir + \"_biplet\"\n \n os.makedirs(output_dir, exist_ok=True)\n \n print(f\"\\n=== Generating Biplet Crops ({size}x{size}) ===\")\n \n converted_count = 0\n size_stats = {}\n \n for img_file in tqdm(sorted(os.listdir(input_dir)), desc=\"Creating biplets\"):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n \n input_path = os.path.join(input_dir, img_file)\n \n try:\n img = Image.open(input_path)\n original_size = img.size\n \n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n \n # Generate 2 crops (ๅ
็ปๅใฎใณใใผใฏๅ้ค)\n crops = generate_two_crops(img, size)\n \n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n \n converted_count += 1\n \n except Exception as e:\n print(f\" โ Error processing {img_file}: {e}\")\n \n print(f\"\\nโ Biplet generation complete:\")\n print(f\" Source images: {converted_count}\")\n print(f\" Biplet crops generated: {converted_count * 2}\")\n print(f\" Original size distribution: {size_stats}\")\n \n return output_dir\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n \n if width > height:\n # Landscape โ Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n \n else:\n # Portrait or Square โ Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n \n return crops\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.097253Z","iopub.execute_input":"2026-01-22T05:38:46.097905Z","iopub.status.idle":"2026-01-22T05:38:46.114125Z","shell.execute_reply.started":"2026-01-22T05:38:46.097878Z","shell.execute_reply":"2026-01-22T05:38:46.113318Z"}},"outputs":[],"execution_count":8},{"cell_type":"code","source":"# ======================================================================\n# IMAGE LOADING\n# ======================================================================\n\ndef load_images_from_directory(image_dir, max_images=200):\n \"\"\"ใใฃใฌใฏใใชใใ็ปๅใใญใผใ\"\"\"\n print(f\"\\nLoading images from: {image_dir}\")\n \n valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}\n image_paths = []\n \n for ext in valid_extensions:\n image_paths.extend(sorted(Path(image_dir).glob(f'*{ext}')))\n image_paths.extend(sorted(Path(image_dir).glob(f'*{ext.upper()}')))\n \n image_paths = sorted(set(str(p) for p in image_paths))\n \n if len(image_paths) > max_images:\n print(f\"โ ๏ธ Limiting from {len(image_paths)} to {max_images} images\")\n image_paths = image_paths[:max_images]\n \n print(f\"โ Found {len(image_paths)} images\")\n return image_paths\n\n\n# ======================================================================\n# MAST3R MODEL\n# ======================================================================\n\ndef load_mast3r_model(device):\n \"\"\"MASt3Rใขใใซใใญใผใ\"\"\"\n print(\"\\n=== Loading MASt3R Model ===\")\n \n # mast3rใฎใในใ่ฟฝๅ \n if '/kaggle/working/mast3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r')\n if '/kaggle/working/mast3r/dust3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n \n from dust3r.model import AsymmetricCroCo3DStereo\n \n try:\n # MASt3Rใขใใซใ่ฉฆใ\n print(f\"Attempting to load: {Config.MAST3R_WEIGHTS}\")\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n print(\"โ Loaded MASt3R model\")\n except Exception as e:\n print(f\"โ ๏ธ Failed to load MASt3R: {e}\")\n print(f\"Trying DUSt3R instead: {Config.DUST3R_WEIGHTS}\")\n try:\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n print(\"โ Loaded DUSt3R model as fallback\")\n except Exception as e2:\n print(f\"โ ๏ธ Failed to load DUSt3R: {e2}\")\n raise Exception(\"Could not load any model. Please check model names and internet connection.\")\n \n model.eval()\n \n print(f\"โ Model loaded on {device}\")\n return model\n\n\n# ======================================================================\n# FEATURE EXTRACTION & PAIR SELECTION\n# ======================================================================\n\ndef load_asmk_retrieval_model(device):\n \"\"\"ASMKใชใใชใผใใซใขใใซใใญใผใ\"\"\"\n print(\"\\n=== Loading ASMK Retrieval Model ===\")\n \n # mast3rใจasmkใฎใในใ่ฟฝๅ \n if '/kaggle/working/mast3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r')\n if '/kaggle/working/mast3r/dust3r' not in sys.path:\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n if '/kaggle/working/asmk' not in sys.path:\n sys.path.insert(0, '/kaggle/working/asmk')\n \n from dust3r.model import AsymmetricCroCo3DStereo\n \n try:\n # MASt3Rใขใใซใ่ฉฆใ\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n print(\"โ Loaded MASt3R model for retrieval\")\n except Exception as e:\n print(f\"โ ๏ธ Failed to load MASt3R: {e}\")\n print(f\"Trying DUSt3R instead\")\n model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n print(\"โ Loaded DUSt3R model for retrieval\")\n \n model.eval()\n \n # Codebookใฎๅๆๅ๏ผ็ฐกๆ็๏ผ\n codebook = np.random.randn(1024, 24).astype(np.float32)\n \n print(\"โ ASMK model loaded\")\n return model, codebook\n\n\ndef extract_mast3r_features(model, image_paths, device, batch_size=1):\n \"\"\"MASt3Rใขใใซใไฝฟ็จใใฆ็นๅพด้ใๆฝๅบ๏ผใใข็ปๅใจใใฆๅฆ็๏ผ\"\"\"\n print(\"\\n=== Extracting MASt3R Features ===\")\n from dust3r.utils.image import load_images\n from dust3r.inference import inference\n \n all_features = []\n \n # ๅ็ปๅใ่ชๅ่ช่บซใจใใขใซใใฆๅฆ็\n for i in tqdm(range(len(image_paths)), desc=\"Features\"):\n img_path = image_paths[i]\n \n # ๅใ็ปๅใ2ๅใญใผใ๏ผใใขใจใใฆ๏ผ\n images = load_images([img_path, img_path], size=Config.IMAGE_SIZE)\n \n # ใใขๅฝขๅผใงๆจ่ซ\n pairs = [(images[0], images[1])]\n \n with torch.no_grad():\n output = inference(pairs, model, device, batch_size=1)\n \n # outputใฎๆง้ ใ็ขบ่ชใใฆใใผใฟใๆฝๅบ\n try:\n # outputใ่พๆธใฎๅ ดๅ (DUSt3Rๅฝขๅผ)\n if isinstance(output, dict):\n # pred1ใใ3D็นใพใใฏ็นๅพด้ใๅๅพ\n if 'pred1' in output:\n pred1 = output['pred1']\n if isinstance(pred1, dict):\n # pts3dใพใใฏdescใๆขใ\n if 'pts3d' in pred1:\n desc = pred1['pts3d']\n elif 'desc' in pred1:\n desc = pred1['desc']\n else:\n # ๅฉ็จๅฏ่ฝใชๆๅใฎใใณใฝใซใไฝฟ็จ\n for key, val in pred1.items():\n if isinstance(val, torch.Tensor):\n desc = val\n break\n else:\n desc = pred1\n elif 'view1' in output:\n desc = output['view1']\n else:\n # ๆๅใฎๅคใไฝฟ็จ\n desc = list(output.values())[0]\n # outputใใฟใใซ (view1, view2) ใฎๅฝขๅผ\n elif isinstance(output, tuple) and len(output) == 2:\n view1, view2 = output\n # view1ใใ็นๅพด้ใๅๅพ\n if isinstance(view1, dict):\n if 'pts3d' in view1:\n desc = view1['pts3d']\n elif 'desc' in view1:\n desc = view1['desc']\n else:\n desc = list(view1.values())[0]\n else:\n desc = view1\n # outputใใชในใใฎๅ ดๅ\n elif isinstance(output, list):\n if len(output) > 0:\n item = output[0]\n if isinstance(item, dict):\n if 'pts3d' in item:\n desc = item['pts3d']\n elif 'desc' in item:\n desc = item['desc']\n else:\n desc = list(item.values())[0]\n else:\n desc = item\n else:\n raise ValueError(\"Empty output\")\n else:\n # ใใฎไปใฎๅฝขๅผ\n desc = output\n \n # ใใณใฝใซใฎๆฌกๅ
ใ่ชฟๆด\n if isinstance(desc, torch.Tensor):\n if desc.dim() == 4:\n desc = desc.squeeze(0) # [1, H, W, C] -> [H, W, C]\n elif desc.dim() == 2:\n # [H*W, C] ใฎๅ ดๅใ้ฉๅใชๅฝข็ถใซๅคๆ\n h = w = int(np.sqrt(desc.shape[0]))\n if h * w == desc.shape[0]:\n desc = desc.reshape(h, w, desc.shape[1])\n \n all_features.append(desc)\n \n except Exception as e:\n print(f\"โ ๏ธ Error extracting features for image {i}: {e}\")\n print(f\" Output type: {type(output)}\")\n if isinstance(output, (list, tuple)):\n print(f\" Output length: {len(output)}\")\n if len(output) > 0:\n print(f\" First item type: {type(output[0])}\")\n if isinstance(output[0], dict):\n print(f\" Keys: {output[0].keys()}\")\n # ใใใฉใซใใฎ็นๅพด้ใ่ฟฝๅ \n all_features.append(torch.zeros((Config.IMAGE_SIZE, Config.IMAGE_SIZE, 24)))\n \n # ใกใขใชใฏใชใข\n del output, images, pairs\n if i % 10 == 0:\n torch.cuda.empty_cache()\n \n print(f\"โ Extracted features for {len(all_features)} images\")\n if all_features:\n first_feat = all_features[0]\n if isinstance(first_feat, torch.Tensor):\n print(f\" Feature shape: {first_feat.shape}\")\n elif isinstance(first_feat, dict):\n print(f\" Feature type: dict with keys: {first_feat.keys()}\")\n elif isinstance(first_feat, np.ndarray):\n print(f\" Feature shape: {first_feat.shape}\")\n else:\n print(f\" Feature type: {type(first_feat)}\")\n \n return all_features\n\n\ndef compute_asmk_similarity(features, codebook):\n \"\"\"ASMKใไฝฟ็จใใฆ้กไผผๅบฆ่กๅใ่จ็ฎ\"\"\"\n print(\"\\n=== Computing ASMK Similarity ===\")\n \n n_images = len(features)\n similarity_matrix = np.zeros((n_images, n_images), dtype=np.float32)\n \n # ๅ็นๅพด้ใใฐใญใผใใซ่จ่ฟฐๅญใซๅคๆ\n global_features = []\n \n for feat in features:\n # featใ่พๆธใฎๅ ดๅใใใณใฝใซใๆฝๅบ\n if isinstance(feat, dict):\n if 'pts3d' in feat:\n feat = feat['pts3d']\n elif 'desc' in feat:\n feat = feat['desc']\n elif 'pred1' in feat:\n pred1 = feat['pred1']\n if isinstance(pred1, dict) and 'pts3d' in pred1:\n feat = pred1['pts3d']\n else:\n feat = pred1\n else:\n # ๆๅใฎใใณใฝใซๅคใไฝฟ็จ\n for val in feat.values():\n if isinstance(val, (torch.Tensor, np.ndarray)):\n feat = val\n break\n \n if isinstance(feat, torch.Tensor):\n feat = feat.cpu().numpy()\n \n # featใฎๅฝข็ถใ็ขบ่ช\n if isinstance(feat, np.ndarray):\n if feat.ndim == 3: # [H, W, C]\n h, w, c = feat.shape\n feat_flat = feat.reshape(-1, c)\n elif feat.ndim == 2: # [N, C]\n feat_flat = feat\n else:\n print(f\"โ ๏ธ Unexpected feature shape: {feat.shape}\")\n feat_flat = feat.reshape(-1, feat.shape[-1])\n \n global_desc = np.mean(feat_flat, axis=0)\n global_features.append(global_desc)\n else:\n print(f\"โ ๏ธ Unexpected feature type: {type(feat)}\")\n # ใใใผ็นๅพด้\n global_features.append(np.zeros(24))\n \n global_features = np.stack(global_features)\n \n if codebook is not None and len(codebook) > 0:\n try:\n print(f\"Using ASMK with codebook size: {len(codebook)}\")\n \n for i in range(n_images):\n feat_i = features[i]\n \n # ่พๆธใใใใณใฝใซใๆฝๅบ\n if isinstance(feat_i, dict):\n if 'pts3d' in feat_i:\n feat_i = feat_i['pts3d']\n elif 'pred1' in feat_i and isinstance(feat_i['pred1'], dict):\n feat_i = feat_i['pred1'].get('pts3d', feat_i['pred1'])\n \n if isinstance(feat_i, torch.Tensor):\n feat_i = feat_i.cpu().numpy()\n \n if feat_i.ndim == 3:\n feat_i = feat_i.reshape(-1, feat_i.shape[-1])\n \n for j in range(i+1, n_images):\n feat_j = features[j]\n \n # ่พๆธใใใใณใฝใซใๆฝๅบ\n if isinstance(feat_j, dict):\n if 'pts3d' in feat_j:\n feat_j = feat_j['pts3d']\n elif 'pred1' in feat_j and isinstance(feat_j['pred1'], dict):\n feat_j = feat_j['pred1'].get('pts3d', feat_j['pred1'])\n \n if isinstance(feat_j, torch.Tensor):\n feat_j = feat_j.cpu().numpy()\n \n if feat_j.ndim == 3:\n feat_j = feat_j.reshape(-1, feat_j.shape[-1])\n \n dist_i = np.linalg.norm(feat_i[:, None, :] - codebook[None, :, :], axis=2)\n dist_j = np.linalg.norm(feat_j[:, None, :] - codebook[None, :, :], axis=2)\n \n assign_i = np.argmin(dist_i, axis=1)\n assign_j = np.argmin(dist_j, axis=1)\n \n common = len(set(assign_i) & set(assign_j))\n sim = common / max(len(set(assign_i)), len(set(assign_j)))\n \n similarity_matrix[i, j] = sim\n similarity_matrix[j, i] = sim\n \n if (i + 1) % 10 == 0:\n print(f\"Processed {i+1}/{n_images} images\")\n \n print(\"โ ASMK similarity computation completed\")\n \n except Exception as e:\n print(f\"โ ๏ธ ASMK failed: {e}, using cosine similarity\")\n global_features_norm = global_features / (np.linalg.norm(global_features, axis=1, keepdims=True) + 1e-8)\n similarity_matrix = global_features_norm @ global_features_norm.T\n \n else:\n print(\"No codebook provided, using cosine similarity\")\n global_features_norm = global_features / (np.linalg.norm(global_features, axis=1, keepdims=True) + 1e-8)\n similarity_matrix = global_features_norm @ global_features_norm.T\n \n np.fill_diagonal(similarity_matrix, -1)\n \n print(f\"Similarity matrix shape: {similarity_matrix.shape}\")\n print(f\"Similarity range: [{similarity_matrix.min():.3f}, {similarity_matrix.max():.3f}]\")\n \n return similarity_matrix\n\n\ndef build_pairs_from_similarity(similarity_matrix, top_k=10):\n \"\"\"้กไผผๅบฆ่กๅใใใใขใๆง็ฏ\"\"\"\n n_images = similarity_matrix.shape[0]\n pairs = []\n \n for i in range(n_images):\n similarities = similarity_matrix[i]\n top_indices = np.argsort(similarities)[::-1][:top_k]\n \n for j in top_indices:\n if j > i:\n pairs.append((i, j))\n \n pairs = list(set(pairs))\n print(f\"โ Built {len(pairs)} unique pairs\")\n \n return pairs\n\n\ndef get_image_pairs_asmk(image_paths, max_pairs=100):\n \"\"\"ASMKใไฝฟ็จใใฆ็ปๅใใขใๅๅพ\"\"\"\n print(\"\\n=== Getting Image Pairs with ASMK ===\")\n \n device = Config.DEVICE\n model, codebook = load_asmk_retrieval_model(device)\n features = extract_mast3r_features(model, image_paths, device)\n similarity_matrix = compute_asmk_similarity(features, codebook)\n pairs = build_pairs_from_similarity(similarity_matrix, Config.RETRIEVAL_TOPK)\n \n # ใขใใซใ่งฃๆพ\n del model\n clear_memory()\n \n if len(pairs) > max_pairs:\n pairs = pairs[:max_pairs]\n print(f\"Limited to {max_pairs} pairs\")\n \n return pairs\n\n\n# ======================================================================\n# MAST3R RECONSTRUCTION\n# ======================================================================\n\ndef run_mast3r_pairs(model, image_paths, pairs, device, batch_size=1):\n \"\"\"MASt3Rใงใใข็ปๅใๅฆ็๏ผใกใขใชๆ้ฉๅ็๏ผ\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n from dust3r.utils.image import load_images\n \n # ใใขใๅถ้๏ผใกใขใช็ฏ็ด๏ผ\n max_pairs_for_memory = 50\n if len(pairs) > max_pairs_for_memory:\n print(f\"โ ๏ธ Limiting pairs from {len(pairs)} to {max_pairs_for_memory} for memory\")\n pairs = pairs[:max_pairs_for_memory]\n \n # ใใขใใ็ปๅใคใณใใใฏในใๅๅพ\n pair_indices = []\n for i, j in pairs:\n pair_indices.extend([i, j])\n unique_indices = sorted(set(pair_indices))\n \n selected_paths = [image_paths[i] for i in unique_indices]\n print(f\"Selected {len(selected_paths)} unique images from {len(pairs)} pairs\")\n \n # ็ปๅใใญใผใ\n images = load_images(selected_paths, size=Config.IMAGE_SIZE)\n \n clear_memory()\n \n # ใคใณใใใฏในใใใใณใฐ๏ผๅ
ใฎใคใณใใใฏใน โ ๆฐใใใคใณใใใฏใน๏ผ\n index_map = {old_idx: new_idx for new_idx, old_idx in enumerate(unique_indices)}\n \n # ใใขใๆฐใใใคใณใใใฏในใซๅคๆใใฆใใข็ปๅใชในใใไฝๆ\n image_pairs = []\n for i, j in pairs:\n new_i = index_map[i]\n new_j = index_map[j]\n image_pairs.append((images[new_i], images[new_j]))\n \n print(f\"Created {len(image_pairs)} image pairs\")\n \n clear_memory()\n \n # ใใใใตใคใบใๅ็ใซ่ชฟๆด\n available_memory = torch.cuda.get_device_properties(device).total_memory\n used_memory = torch.cuda.memory_allocated(device)\n free_memory = available_memory - used_memory\n \n if free_memory < 2e9:\n batch_size = 1\n print(f\"โ ๏ธ Low memory, using batch_size=1\")\n \n # ๆจ่ซใๅฎ่ก\n print(f\"Running inference on {len(image_pairs)} pairs...\")\n with torch.no_grad():\n output = inference(image_pairs, model, device, batch_size=batch_size)\n \n print(f\"โ Processed {len(output)} predictions\")\n \n clear_memory()\n \n # Global alignmentใฎๆบๅ\n scene = global_aligner(\n dust3r_output=output,\n device=device,\n mode=GlobalAlignerMode.PointCloudOptimizer,\n verbose=True\n )\n \n clear_memory()\n \n # Global alignment\n print(\"Running global alignment...\")\n try:\n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=50,\n schedule='cosine', \n lr=0.01\n )\n print(f\"โ Alignment complete (loss: {loss:.6f})\")\n except RuntimeError as e:\n if \"out of memory\" in str(e).lower():\n print(\"โ ๏ธ OOM during alignment, trying with fewer iterations...\")\n clear_memory()\n \n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=20,\n schedule='cosine', \n lr=0.01\n )\n print(f\"โ Alignment complete with reduced iterations (loss: {loss:.6f})\")\n else:\n raise\n \n clear_memory()\n \n return scene, images\n\n\n# ======================================================================\n# CAMERA PARAMETER EXTRACTION (PROCESS2)\n# ======================================================================\n\ndef extract_camera_params_process2(scene, image_paths, conf_threshold=1.5):\n \"\"\"Process2: sceneใใ็ดๆฅใซใกใฉใใฉใกใผใฟใจ3D็นใๆฝๅบ\"\"\"\n print(\"\\n=== Extracting Camera Parameters (Process2) ===\")\n \n cameras_dict = {}\n all_pts3d = []\n all_confidence = []\n \n # sceneใใๅฎ้ใฎ็ปๅๆฐใๅๅพ\n try:\n if hasattr(scene, 'get_im_poses'):\n poses = scene.get_im_poses()\n n_images = len(poses)\n elif hasattr(scene, 'im_poses'):\n poses = scene.im_poses\n n_images = len(poses)\n else:\n n_images = len(image_paths)\n poses = None\n \n print(f\"Scene has {n_images} images, image_paths has {len(image_paths)} images\")\n \n # ๅฎ้ใฎscene็ปๅๆฐใจimage_pathsใฎๅฐใใๆนใไฝฟ็จ\n n_images = min(n_images, len(image_paths))\n \n if hasattr(scene, 'get_focals'):\n focals = scene.get_focals()\n elif hasattr(scene, 'im_focals'):\n focals = scene.im_focals\n else:\n focals = None\n \n if hasattr(scene, 'get_principal_points'):\n pps = scene.get_principal_points()\n elif hasattr(scene, 'im_pp'):\n pps = scene.im_pp\n else:\n pps = None\n \n except Exception as e:\n print(f\"โ ๏ธ Error getting camera parameters: {e}\")\n n_images = len(image_paths)\n poses = None\n focals = None\n pps = None\n \n for idx in range(n_images):\n #-------------------------------------------------------------------\n print(f\"\\n=== Image {idx}: {os.path.basename(image_paths[idx])} ===\")\n \n # Poseใๅๅพใฎๅพใซ่ฟฝๅ \n if poses is not None and idx < len(poses):\n pose = poses[idx]\n if isinstance(pose, torch.Tensor):\n pose = pose.detach().cpu().numpy()\n \n print(f\" Pose type: {type(pose)}\")\n print(f\" Pose shape: {pose.shape if hasattr(pose, 'shape') else 'N/A'}\")\n if hasattr(pose, 'shape') and pose.shape == (4, 4):\n print(f\" Rotation det: {np.linalg.det(pose[:3, :3]):.3f}\")\n #-------------------------------------------------------------------\n \n img_name = os.path.basename(image_paths[idx])\n \n try:\n # Poseใๅๅพ\n if poses is not None and idx < len(poses):\n pose = poses[idx]\n if isinstance(pose, torch.Tensor):\n pose = pose.detach().cpu().numpy()\n \n # poseใๆญฃใใๅฝข็ถใงใชใๅ ดๅใๅไฝ่กๅใไฝฟ็จ\n if not isinstance(pose, np.ndarray) or pose.shape != (4, 4):\n print(f\" โ Image {idx}: pose shape {pose.shape if hasattr(pose, 'shape') else 'N/A'}, using identity\")\n pose = np.eye(4)\n else:\n pose = np.eye(4)\n \n # Focalใๅๅพ\n if focals is not None and idx < len(focals):\n focal = focals[idx]\n if isinstance(focal, torch.Tensor):\n focal = focal.detach().cpu().item()\n else:\n focal = float(focal)\n else:\n focal = 1000.0\n \n # Principal pointใๅๅพ\n if pps is not None and idx < len(pps):\n pp = pps[idx]\n if isinstance(pp, torch.Tensor):\n pp = pp.detach().cpu().numpy()\n else:\n pp = np.array([112.0, 112.0])\n \n # ใซใกใฉใใฉใกใผใฟใไฟๅญ\n cameras_dict[img_name] = {\n 'focal': focal,\n 'pp': pp,\n 'pose': pose\n }\n \n # 3D็นใๅๅพ\n if hasattr(scene, 'im_pts3d') and idx < len(scene.im_pts3d):\n pts3d_img = scene.im_pts3d[idx]\n elif hasattr(scene, 'get_pts3d'):\n pts3d_all = scene.get_pts3d()\n if idx < len(pts3d_all):\n pts3d_img = pts3d_all[idx]\n else:\n pts3d_img = None\n else:\n pts3d_img = None\n \n # Confidenceใๅๅพ\n if hasattr(scene, 'im_conf') and idx < len(scene.im_conf):\n conf_img = scene.im_conf[idx]\n elif hasattr(scene, 'get_conf'):\n conf_all = scene.get_conf()\n if idx < len(conf_all):\n conf_img = conf_all[idx]\n else:\n conf_img = None\n else:\n conf_img = None\n \n # 3D็นใจconfidenceใๅฆ็\n if pts3d_img is not None:\n if isinstance(pts3d_img, torch.Tensor):\n pts3d_img = pts3d_img.detach().cpu().numpy()\n \n if pts3d_img.ndim == 3:\n pts3d_flat = pts3d_img.reshape(-1, 3)\n else:\n pts3d_flat = pts3d_img\n \n all_pts3d.append(pts3d_flat)\n \n # confidenceใๅฆ็\n if conf_img is not None:\n if isinstance(conf_img, list):\n conf_img = np.array(conf_img)\n elif isinstance(conf_img, torch.Tensor):\n conf_img = conf_img.detach().cpu().numpy()\n \n if conf_img.ndim > 1:\n conf_flat = conf_img.reshape(-1)\n else:\n conf_flat = conf_img\n \n if len(conf_flat) != len(pts3d_flat):\n conf_flat = np.ones(len(pts3d_flat))\n \n all_confidence.append(conf_flat)\n else:\n all_confidence.append(np.ones(len(pts3d_flat)))\n \n except Exception as e:\n print(f\"โ ๏ธ Error processing image {idx} ({img_name}): {e}\")\n cameras_dict[img_name] = {\n 'focal': 1000.0,\n 'pp': np.array([112.0, 112.0]),\n 'pose': np.eye(4)\n }\n continue\n \n # ๅ
จ3D็นใ็ตๅ\n if all_pts3d:\n pts3d = np.vstack(all_pts3d)\n confidence = np.concatenate(all_confidence)\n else:\n pts3d = np.zeros((0, 3))\n confidence = np.zeros(0)\n \n print(f\"โ Extracted camera parameters for {len(cameras_dict)} images\")\n print(f\"โ Total 3D points: {len(pts3d)}\")\n \n # Confidenceใงใใฃใซใฟใชใณใฐ\n if len(confidence) > 0:\n valid_mask = confidence > conf_threshold\n pts3d = pts3d[valid_mask]\n confidence = confidence[valid_mask]\n print(f\"โ After confidence filtering (>{conf_threshold}): {len(pts3d)} points\")\n \n return cameras_dict, pts3d, confidence","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.115110Z","iopub.execute_input":"2026-01-22T05:38:46.115484Z","iopub.status.idle":"2026-01-22T05:38:46.172173Z","shell.execute_reply.started":"2026-01-22T05:38:46.115461Z","shell.execute_reply":"2026-01-22T05:38:46.171429Z"}},"outputs":[],"execution_count":9},{"cell_type":"code","source":"# ======================================================================\n# COLMAP EXPORTใv.10\n# ======================================================================\nimport os\nimport struct\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\n\ndef write_colmap_sparse(cameras_dict, pts3d, confidence, image_paths, output_dir):\n \"\"\"\n COLMAP sparseๅฝขๅผใใใคใใชใใกใคใซใงๅบๅ\n Gaussian Splattingใ่ฆๆฑใใ .bin ๅฝขๅผ\n \"\"\"\n os.makedirs(output_dir, exist_ok=True)\n \n # ใซใกใฉใใฉใกใผใฟใๅๅพ\n if not cameras_dict:\n raise ValueError(\"cameras_dict is empty\")\n \n first_key = list(cameras_dict.keys())[0]\n first_cam = cameras_dict[first_key]\n \n w = int(first_cam.get('width', 1920))\n h = int(first_cam.get('height', 1080))\n focal = float(first_cam.get('focal', max(w, h) * 1.2))\n cx = w / 2.0\n cy = h / 2.0\n \n # --- 1. cameras.bin ใๆธใๅบใ ---\n cameras_file = os.path.join(output_dir, 'cameras.bin')\n with open(cameras_file, 'wb') as f:\n # ใใใใผ: ใซใกใฉๆฐ\n f.write(struct.pack('Q', 1)) # 1ใคใฎใซใกใฉ\n \n # ใซใกใฉใใผใฟ\n camera_id = 1\n model_id = 1 # PINHOLE\n f.write(struct.pack('i', camera_id))\n f.write(struct.pack('i', model_id))\n f.write(struct.pack('Q', w))\n f.write(struct.pack('Q', h))\n # PINHOLE: fx, fy, cx, cy\n f.write(struct.pack('d', focal))\n f.write(struct.pack('d', focal))\n f.write(struct.pack('d', cx))\n f.write(struct.pack('d', cy))\n \n print(f\"โ Written cameras.bin\")\n \n # --- 2. images.bin ใๆธใๅบใ ---\n images_file = os.path.join(output_dir, 'images.bin')\n with open(images_file, 'wb') as f:\n # ใใใใผ: ็ปๅๆฐ\n f.write(struct.pack('Q', len(image_paths)))\n \n for i, img_path in enumerate(image_paths):\n img_name = os.path.basename(img_path)\n \n cam_info = cameras_dict.get(i) or cameras_dict.get(str(i)) or cameras_dict.get(img_name)\n if cam_info is None:\n print(f\" โ ๏ธ No camera info for {img_name}, using identity pose\")\n pose = np.eye(4)\n else:\n pose = cam_info['pose']\n \n # Pose (C2W -> W2C)\n try:\n w2c = np.linalg.inv(pose)\n except np.linalg.LinAlgError:\n print(f\" โ ๏ธ Singular matrix for {img_name}, using identity\")\n w2c = np.eye(4)\n \n rot_mat = w2c[:3, :3]\n tvec = w2c[:3, 3]\n \n # Rotation -> Quaternion [w, x, y, z]\n quat = R.from_matrix(rot_mat).as_quat() # [x, y, z, w]\n qw, qx, qy, qz = quat[3], quat[0], quat[1], quat[2]\n \n # ็ปๅใใผใฟ\n image_id = i + 1\n f.write(struct.pack('i', image_id))\n f.write(struct.pack('d', qw))\n f.write(struct.pack('d', qx))\n f.write(struct.pack('d', qy))\n f.write(struct.pack('d', qz))\n f.write(struct.pack('d', tvec[0]))\n f.write(struct.pack('d', tvec[1]))\n f.write(struct.pack('d', tvec[2]))\n f.write(struct.pack('i', 1)) # camera_id\n \n # ็ปๅๅ (null็ต็ซฏๆๅญๅ)\n img_name_bytes = img_name.encode('utf-8') + b'\\x00'\n f.write(img_name_bytes)\n \n # 2D็นๆฐ (0ๅ)\n f.write(struct.pack('Q', 0))\n \n print(f\"โ Written images.bin ({len(image_paths)} images)\")\n \n # --- 3. points3D.bin ใๆธใๅบใ ---\n points_file = os.path.join(output_dir, 'points3D.bin')\n with open(points_file, 'wb') as f:\n # ใใใใผ: ็นๆฐ\n f.write(struct.pack('Q', len(pts3d)))\n \n for point_id, point in enumerate(pts3d, start=1):\n f.write(struct.pack('Q', point_id))\n f.write(struct.pack('d', point[0])) # X\n f.write(struct.pack('d', point[1])) # Y\n f.write(struct.pack('d', point[2])) # Z\n f.write(struct.pack('B', 255)) # R\n f.write(struct.pack('B', 255)) # G\n f.write(struct.pack('B', 255)) # B\n f.write(struct.pack('d', 0.0)) # error\n \n # Track (็ฉบ)\n f.write(struct.pack('Q', 0)) # track length\n \n print(f\"โ Written points3D.bin ({len(pts3d)} points)\")\n \n # --- 4. ใใญในใๅฝขๅผใๆธใๅบใ (ใใใใฐ็จ) ---\n write_text_versions(cameras_dict, pts3d, image_paths, output_dir, w, h, focal, cx, cy)\n \n print(f\"\\nโ COLMAP sparse reconstruction saved (binary + text)\")\n print(f\" - Camera: PINHOLE\")\n print(f\" - Images: {len(image_paths)}\")\n print(f\" - Points: {len(pts3d)}\")\n print(f\" - Output: {output_dir}\")\n \n return output_dir\n\n\ndef write_text_versions(cameras_dict, pts3d, image_paths, output_dir, w, h, focal, cx, cy):\n \"\"\"ใใญในใๅฝขๅผใๅบๅ๏ผใใใใฐ็จ๏ผ\"\"\"\n \n # cameras.txt\n with open(os.path.join(output_dir, 'cameras.txt'), 'w') as file:\n file.write(\"# Camera list with one line of data per camera:\\n\")\n file.write(\"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\\n\")\n file.write(f\"1 PINHOLE {w} {h} {focal} {focal} {cx} {cy}\\n\")\n \n # images.txt\n with open(os.path.join(output_dir, 'images.txt'), 'w') as file:\n file.write(\"# Image list with two lines of data per image:\\n\")\n file.write(\"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n\")\n file.write(\"# POINTS2D[] as (X, Y, POINT3D_ID)\\n\")\n \n for i, img_path in enumerate(image_paths):\n img_name = os.path.basename(img_path)\n cam_info = cameras_dict.get(i) or cameras_dict.get(str(i)) or cameras_dict.get(img_name)\n \n if cam_info is None:\n pose = np.eye(4)\n else:\n pose = cam_info['pose']\n \n try:\n w2c = np.linalg.inv(pose)\n except np.linalg.LinAlgError:\n w2c = np.eye(4)\n \n rot_mat = w2c[:3, :3]\n tvec = w2c[:3, 3]\n quat = R.from_matrix(rot_mat).as_quat()\n qw, qx, qy, qz = quat[3], quat[0], quat[1], quat[2]\n \n image_id = i + 1\n file.write(f\"{image_id} {qw} {qx} {qy} {qz} {tvec[0]} {tvec[1]} {tvec[2]} 1 {img_name}\\n\")\n file.write(\"\\n\")\n \n # points3D.txt\n with open(os.path.join(output_dir, 'points3D.txt'), 'w') as file:\n file.write(\"# 3D point list with one line of data per point:\\n\")\n file.write(\"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[]\\n\")\n \n for point_id, point in enumerate(pts3d, start=1):\n file.write(f\"{point_id} {point[0]} {point[1]} {point[2]} 255 255 255 0.0\\n\")\n\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.173053Z","iopub.execute_input":"2026-01-22T05:38:46.173288Z","iopub.status.idle":"2026-01-22T05:38:46.332868Z","shell.execute_reply.started":"2026-01-22T05:38:46.173255Z","shell.execute_reply":"2026-01-22T05:38:46.331939Z"}},"outputs":[],"execution_count":10},{"cell_type":"code","source":"import os\nimport subprocess\n\ndef run_gaussian_splatting(source_dir, output_dir, iterations=30000):\n \"\"\"\n Gaussian Splattingใๅฎ่ก\n \n Args:\n source_dir: COLMAPใใผใฟใฎใใใใฃใฌใฏใใช (sparse/0 ใจ images ใๅซใ)\n output_dir: ๅบๅใใฃใฌใฏใใช\n iterations: ๅญฆ็ฟใคใใฌใผใทใงใณๆฐ\n \"\"\"\n print(\"\\n=== Running Gaussian Splatting ===\")\n \n # ๅบๅใใฃใฌใฏใใชใไฝๆ\n os.makedirs(output_dir, exist_ok=True)\n \n cmd = [\n \"python\", \"/kaggle/working/gaussian-splatting/train.py\",\n \"-s\", source_dir, # ใฝใผในใใฃใฌใฏใใช (sparse/0ใจimagesใๅซใ)\n \"-m\", output_dir, # ใขใใซๅบๅใใฃใฌใฏใใช\n \"--iterations\", str(iterations),\n \"--eval\"\n ]\n \n print(f\"Command: {' '.join(cmd)}\")\n print(f\" Source: {source_dir}\")\n print(f\" Output: {output_dir}\")\n \n # ๅฎ่ก\n result = subprocess.run(cmd, capture_output=False, text=True)\n \n if result.returncode == 0:\n print(f\"\\nโ Gaussian Splatting complete\")\n \n # ็ๆใใใใใกใคใซใ็ขบ่ช\n point_cloud_dir = os.path.join(output_dir, \"point_cloud\")\n if os.path.exists(point_cloud_dir):\n print(f\"\\nโ Point cloud directory found: {point_cloud_dir}\")\n \n # iteration_*ใใฉใซใใๆค็ดข\n for item in sorted(os.listdir(point_cloud_dir)):\n item_path = os.path.join(point_cloud_dir, item)\n if os.path.isdir(item_path) and item.startswith(\"iteration_\"):\n ply_file = os.path.join(item_path, \"point_cloud.ply\")\n if os.path.exists(ply_file):\n file_size = os.path.getsize(ply_file) / (1024 * 1024) # MB\n print(f\" โ {item}/point_cloud.ply ({file_size:.2f} MB)\")\n else:\n print(f\"\\nโ ๏ธ Warning: point_cloud directory not found\")\n print(f\" Expected: {point_cloud_dir}\")\n print(f\" Contents of {output_dir}:\")\n if os.path.exists(output_dir):\n for item in os.listdir(output_dir):\n print(f\" - {item}\")\n else:\n print(f\"\\nโ Gaussian Splatting failed with return code {result.returncode}\")\n \n return output_dir\n\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.333924Z","iopub.execute_input":"2026-01-22T05:38:46.334288Z","iopub.status.idle":"2026-01-22T05:38:46.350308Z","shell.execute_reply.started":"2026-01-22T05:38:46.334252Z","shell.execute_reply":"2026-01-22T05:38:46.349391Z"}},"outputs":[],"execution_count":11},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024, iterations=30000, \n max_images=200, max_pairs=100, max_points=500000, \n conf_threshold=1.5, preprocess_mode='none'):\n \"\"\"\n ใกใคใณใใคใใฉใคใณ๏ผๅฎๅ
จ็๏ผ\n \n Args:\n image_dir: ๅ
ฅๅ็ปๅใใฃใฌใฏใใช\n output_dir: ๅบๅใใฃใฌใฏใใช\n square_size: Bipletๅๅฆ็ๆใฎๆญฃๆนๅฝขใตใคใบ\n iterations: Gaussian Splattingใฎๅๅพฉๅๆฐ\n max_images: ๆๅคง็ปๅๆฐ\n max_pairs: ๆๅคงใใขๆฐ\n max_points: ๆๅคง็น็พคๆฐ\n conf_threshold: Confidence้พๅค\n preprocess_mode: ๅๅฆ็ใขใผใ\n - 'none': ๅๅฆ็ใชใ๏ผๅ
็ปๅใใใฎใพใพไฝฟ็จ๏ผ\n - 'biplet': 2ใคใฎๆญฃๆนๅฝขใฏใญใใใ็ๆ๏ผๅ
็ปๅใไฟๅญ๏ผ\n \"\"\"\n \n # ======================================================================\n # STEP 0: Image Preprocessing (Optional)\n # ======================================================================\n if preprocess_mode == 'biplet':\n print(\"=\"*70)\n print(\"STEP 0: Image Preprocessing (Biplet Crops)\")\n print(\"=\"*70)\n \n # Bipletใฏใญใใใไธๆใใฃใฌใฏใใชใซ็ๆ\n temp_biplet_dir = os.path.join(output_dir, \"temp_biplet\")\n biplet_dir = normalize_image_sizes_biplet(\n image_dir,\n temp_biplet_dir,\n size=square_size\n )\n \n # Bipletใฏใญใใใฎใฟใimagesใใฉใซใใซ็งปๅ\n images_dir = os.path.join(output_dir, \"images\")\n os.makedirs(images_dir, exist_ok=True)\n \n # _left, _right, _top, _bottom ใฎใใกใคใซใฎใฟใณใใผ\n biplet_suffixes = ['_left', '_right', '_top', '_bottom']\n copied_count = 0\n \n for img_file in os.listdir(temp_biplet_dir):\n if any(suffix in img_file for suffix in biplet_suffixes):\n src = os.path.join(temp_biplet_dir, img_file)\n dst = os.path.join(images_dir, img_file)\n shutil.copy2(src, dst)\n copied_count += 1\n \n print(f\"โ Copied {copied_count} biplet images to {images_dir}\")\n \n # ๅ
็ปๅใๅฅใใฉใซใใซไฟๅญ๏ผๅ
ฅๅใใฃใฌใฏใใชใใ็ดๆฅใณใใผ๏ผ\n original_images_dir = os.path.join(output_dir, \"original_images\")\n os.makedirs(original_images_dir, exist_ok=True)\n \n original_count = 0\n valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp')\n for img_file in os.listdir(image_dir):\n if img_file.lower().endswith(valid_extensions):\n src = os.path.join(image_dir, img_file)\n dst = os.path.join(original_images_dir, img_file)\n shutil.copy2(src, dst)\n original_count += 1\n \n print(f\"โ Saved {original_count} original images to {original_images_dir}\")\n \n # ไธๆใใฃใฌใฏใใชใๅ้ค\n shutil.rmtree(temp_biplet_dir)\n \n image_dir = images_dir\n clear_memory()\n else:\n # preprocess_mode == 'none'ใฎๅ ดๅใ็ปๅใimagesใใฉใซใใซใณใใผ\n images_dir = os.path.join(output_dir, \"images\")\n if not os.path.exists(images_dir):\n print(\"=\"*70)\n print(\"STEP 0: Copying images to output directory\")\n print(\"=\"*70)\n shutil.copytree(image_dir, images_dir)\n print(f\"โ Copied images to {images_dir}\")\n image_dir = images_dir\n \n # ======================================================================\n # STEP 1: Loading and Preparing Images\n # ======================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: Loading and Preparing Images\")\n print(\"=\"*70)\n \n image_paths = load_images_from_directory(image_dir, max_images=max_images)\n print(f\"Loaded {len(image_paths)} images\")\n clear_memory()\n \n # ======================================================================\n # STEP 2: Image Pair Selection\n # ======================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Image Pair Selection\")\n print(\"=\"*70)\n \n max_pairs = min(max_pairs, 50)\n pairs = get_image_pairs_asmk(image_paths, max_pairs=max_pairs)\n print(f\"Selected {len(pairs)} image pairs\")\n clear_memory()\n \n # ======================================================================\n # STEP 3: MASt3R 3D Reconstruction\n # ======================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: MASt3R 3D Reconstruction\")\n print(\"=\"*70)\n \n device = Config.DEVICE\n model = load_mast3r_model(device)\n \n scene, mast3r_images = run_mast3r_pairs(model, image_paths, pairs, device)\n \n # ใขใใซใ่งฃๆพ\n del model\n clear_memory()\n \n # ======================================================================\n # STEP 4: Converting to COLMAP (PINHOLE)\n # ======================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Converting to COLMAP (PINHOLE)\")\n print(\"=\"*70)\n \n cameras_dict, pts3d, confidence = extract_camera_params_process2(\n scene, image_paths, conf_threshold=conf_threshold\n )\n \n # sceneใ่งฃๆพ\n del scene\n clear_memory()\n \n # ็นๆฐใๅถ้\n if len(pts3d) > max_points:\n print(f\"โ ๏ธ Limiting points from {len(pts3d)} to {max_points}\")\n indices = np.random.choice(len(pts3d), max_points, replace=False)\n pts3d = pts3d[indices]\n confidence = confidence[indices]\n \n print(f\"Final point count: {len(pts3d)}\")\n \n # COLMAPๅคๆ๏ผใใคใใชๅฝขๅผ๏ผ\n colmap_dir = os.path.join(output_dir, \"sparse/0\")\n os.makedirs(colmap_dir, exist_ok=True)\n \n write_colmap_sparse(cameras_dict, pts3d, confidence, image_paths, colmap_dir)\n \n clear_memory()\n \n # ======================================================================\n # STEP 5: Running Gaussian Splatting\n # ======================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Running Gaussian Splatting\")\n print(\"=\"*70)\n \n # ใฝใผในใใฃใฌใฏใใช๏ผsparse/0ใจimagesใๅซใ๏ผ\n source_dir = output_dir\n \n # ใขใใซๅบๅใใฃใฌใฏใใช\n model_output_dir = os.path.join(output_dir, \"gaussian_splatting\")\n \n gs_output = run_gaussian_splatting(\n source_dir=source_dir,\n output_dir=model_output_dir,\n iterations=iterations\n )\n \n # ======================================================================\n # STEP 6: Verify Output\n # ======================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"PIPELINE COMPLETE\")\n print(\"=\"*70)\n \n # PLYใใกใคใซใฎ็ขบ่ช\n ply_path = os.path.join(\n model_output_dir, \n \"point_cloud\", \n f\"iteration_{iterations}\", \n \"point_cloud.ply\"\n )\n \n if os.path.exists(ply_path):\n file_size = os.path.getsize(ply_path) / (1024 * 1024)\n print(f\"โ Point cloud generated: {ply_path}\")\n print(f\" Size: {file_size:.2f} MB\")\n else:\n print(f\"โ ๏ธ Point cloud not found at: {ply_path}\")\n print(f\"\\nChecking alternative locations...\")\n \n # ไปใฎiterationใใฉใซใใ็ขบ่ช\n point_cloud_dir = os.path.join(model_output_dir, \"point_cloud\")\n if os.path.exists(point_cloud_dir):\n for item in sorted(os.listdir(point_cloud_dir)):\n item_path = os.path.join(point_cloud_dir, item)\n if os.path.isdir(item_path) and item.startswith(\"iteration_\"):\n ply_file = os.path.join(item_path, \"point_cloud.ply\")\n if os.path.exists(ply_file):\n file_size = os.path.getsize(ply_file) / (1024 * 1024)\n print(f\" Found: {item}/point_cloud.ply ({file_size:.2f} MB)\")\n \n print(f\"\\nOutput directory structure:\")\n print(f\" {output_dir}/\")\n print(f\" โโโ images/ (processed images for training)\")\n if preprocess_mode == 'biplet':\n print(f\" โโโ original_images/ (original source images)\")\n print(f\" โโโ sparse/0/ (COLMAP data)\")\n print(f\" โโโ gaussian_splatting/ (GS output)\")\n \n return gs_output\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.351703Z","iopub.execute_input":"2026-01-22T05:38:46.352259Z","iopub.status.idle":"2026-01-22T05:38:46.371850Z","shell.execute_reply.started":"2026-01-22T05:38:46.352226Z","shell.execute_reply":"2026-01-22T05:38:46.371267Z"}},"outputs":[],"execution_count":12},{"cell_type":"markdown","source":" # ======================================================================\n # USAGE EXAMPLE\n # ======================================================================\n \n if __name__ == \"__main__\":\n IMAGE_DIR = \"/kaggle/input/your-dataset/images\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n \n # ไฝฟ็จไพ1: ๅๅฆ็ใชใ๏ผๅ
็ปๅใใใฎใพใพไฝฟ็จ๏ผ\n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n iterations=30000,\n max_images=100,\n max_pairs=50,\n max_points=300000,\n conf_threshold=1.5,\n preprocess_mode='none' # ๅๅฆ็ใชใ\n )\n \n # ไฝฟ็จไพ2: Bipletใฏใญใใ๏ผ็ปๅๆฐใ2ๅใซใชใ๏ผ\n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024, # ใฏใญใใใตใคใบ\n iterations=30000,\n max_images=50, # Bipletใง2ๅใซใชใใฎใงๅฐใชใใซ\n max_pairs=50,\n max_points=300000,\n conf_threshold=1.5,\n preprocess_mode='biplet' # 2ใคใฎใฏใญใใ็ๆ\n )\n \n print(\"\\n\" + \"=\"*70)\n print(\"PIPELINE COMPLETE\")\n print(\"=\"*70)\n print(f\"Output directory: {gs_output}\")","metadata":{}},{"cell_type":"code","source":"print(f\"โ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:46.372751Z","iopub.execute_input":"2026-01-22T05:38:46.373019Z","iopub.status.idle":"2026-01-22T05:38:48.515382Z","shell.execute_reply.started":"2026-01-22T05:38:46.372990Z","shell.execute_reply":"2026-01-22T05:38:48.514538Z"}},"outputs":[{"name":"stdout","text":"โ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\nVersion: 1.26.4\nVersion 3.1, 31 March 2009\n Version 3, 29 June 2007\n 5. Conveying Modified Source Versions.\n 14. Revised Versions of this License.\n","output_type":"stream"}],"execution_count":13},{"cell_type":"code","source":"# romaใๅฟ
่ฆใชๆ็นใง็ขบ่ชใปใคใณในใใผใซ\ntry:\n import roma\n print(\"โ roma is installed\")\nexcept ModuleNotFoundError:\n print(\"โ ๏ธ roma not found, installing...\")\n !pip install roma\n import roma\n print(\"โ roma installed\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:48.519028Z","iopub.execute_input":"2026-01-22T05:38:48.519296Z","iopub.status.idle":"2026-01-22T05:38:48.524366Z","shell.execute_reply.started":"2026-01-22T05:38:48.519268Z","shell.execute_reply":"2026-01-22T05:38:48.523676Z"}},"outputs":[{"name":"stdout","text":"โ roma is installed\n","output_type":"stream"}],"execution_count":14},{"cell_type":"code","source":"# ======================================================================\n# USAGE EXAMPLE\n# ======================================================================\n\nif __name__ == \"__main__\":\n IMAGE_DIR = \"/kaggle/input/two-dogs/fountain80/fountain80\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n\n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=800,\n iterations=1000,\n max_images=25, #\n max_pairs=25,\n max_points=4000,\n conf_threshold=1.5,\n preprocess_mode='biplet' \n )\n \n print(\"\\n\" + \"=\"*70)\n print(\"PIPELINE COMPLETE\")\n print(\"=\"*70)\n print(f\"Output directory: {gs_output}\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-22T05:38:48.525206Z","iopub.execute_input":"2026-01-22T05:38:48.525610Z","execution_failed":"2026-01-22T05:41:49.309Z"}},"outputs":[{"name":"stdout","text":"======================================================================\nSTEP 0: Image Preprocessing (Biplet Crops)\n======================================================================\n\n=== Generating Biplet Crops (800x800) ===\n","output_type":"stream"},{"name":"stderr","text":"Creating biplets: 100%|โโโโโโโโโโ| 80/80 [00:08<00:00, 9.63it/s]\n","output_type":"stream"},{"name":"stdout","text":"\nโ Biplet generation complete:\n Source images: 80\n Biplet crops generated: 160\n Original size distribution: {'1440x1920': 80}\nโ Copied 160 biplet images to /kaggle/working/output/images\nโ Saved 80 original images to /kaggle/working/output/original_images\n\n======================================================================\nSTEP 1: Loading and Preparing Images\n======================================================================\n\nLoading images from: /kaggle/working/output/images\nโ ๏ธ Limiting from 160 to 30 images\nโ Found 30 images\nLoaded 30 images\n\n======================================================================\nSTEP 2: Image Pair Selection\n======================================================================\n\n=== Getting Image Pairs with ASMK ===\n\n=== Loading ASMK Retrieval Model ===\nโ ๏ธ Failed to load MASt3R: tried to load naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric from huggingface, but failed\nTrying DUSt3R instead\nโ Loaded DUSt3R model for retrieval\nโ ASMK model loaded\n\n=== Extracting MASt3R Features ===\n","output_type":"stream"},{"name":"stderr","text":"Features: 0%| | 0/30 [00:00<?, ?it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_001_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_001_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A/kaggle/working/mast3r/dust3r/dust3r/inference.py:44: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=bool(use_amp)):\n/kaggle/working/mast3r/dust3r/dust3r/model.py:206: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n/kaggle/working/mast3r/dust3r/dust3r/inference.py:48: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 1.92it/s]\u001b[A\nFeatures: 3%|โ | 1/30 [00:00<00:16, 1.77it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_001_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_001_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 4.69it/s]\u001b[A\nFeatures: 7%|โ | 2/30 [00:00<00:10, 2.62it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_002_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_002_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.14it/s]\u001b[A\nFeatures: 10%|โ | 3/30 [00:01<00:08, 3.17it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_002_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_002_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.16it/s]\u001b[A\nFeatures: 13%|โโ | 4/30 [00:01<00:07, 3.55it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_003_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_003_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.17it/s]\u001b[A\nFeatures: 17%|โโ | 5/30 [00:01<00:06, 3.79it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_003_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_003_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.09it/s]\u001b[A\nFeatures: 20%|โโ | 6/30 [00:01<00:06, 3.94it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_004_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_004_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.13it/s]\u001b[A\nFeatures: 23%|โโโ | 7/30 [00:01<00:05, 4.05it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_004_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_004_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.21it/s]\u001b[A\nFeatures: 27%|โโโ | 8/30 [00:02<00:05, 4.14it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_005_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_005_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.17it/s]\u001b[A\nFeatures: 30%|โโโ | 9/30 [00:02<00:05, 4.20it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_005_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_005_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.14it/s]\u001b[A\nFeatures: 33%|โโโโ | 10/30 [00:02<00:04, 4.23it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_006_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_006_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.07it/s]\u001b[A\nFeatures: 37%|โโโโ | 11/30 [00:02<00:04, 4.21it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_006_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_006_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.06it/s]\u001b[A\nFeatures: 40%|โโโโ | 12/30 [00:03<00:04, 4.21it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_007_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_007_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.17it/s]\u001b[A\nFeatures: 43%|โโโโโ | 13/30 [00:03<00:04, 4.22it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_007_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_007_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.14it/s]\u001b[A\nFeatures: 47%|โโโโโ | 14/30 [00:03<00:03, 4.23it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_008_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_008_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.15it/s]\u001b[A\nFeatures: 50%|โโโโโ | 15/30 [00:03<00:03, 4.23it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_008_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_008_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.20it/s]\u001b[A\nFeatures: 53%|โโโโโโ | 16/30 [00:04<00:03, 4.26it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_009_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_009_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.00it/s]\u001b[A\nFeatures: 57%|โโโโโโ | 17/30 [00:04<00:03, 4.19it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_009_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_009_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 4.93it/s]\u001b[A\nFeatures: 60%|โโโโโโ | 18/30 [00:04<00:02, 4.17it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_010_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_010_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.12it/s]\u001b[A\nFeatures: 63%|โโโโโโโ | 19/30 [00:04<00:02, 4.19it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_010_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_010_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.15it/s]\u001b[A\nFeatures: 67%|โโโโโโโ | 20/30 [00:05<00:02, 4.23it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_011_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_011_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.17it/s]\u001b[A\nFeatures: 70%|โโโโโโโ | 21/30 [00:05<00:02, 4.22it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_011_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_011_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 4.81it/s]\u001b[A\nFeatures: 73%|โโโโโโโโ | 22/30 [00:05<00:01, 4.16it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_012_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_012_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.15it/s]\u001b[A\nFeatures: 77%|โโโโโโโโ | 23/30 [00:05<00:01, 4.20it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_012_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_012_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.11it/s]\u001b[A\nFeatures: 80%|โโโโโโโโ | 24/30 [00:06<00:01, 4.23it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_013_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_013_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.13it/s]\u001b[A\nFeatures: 83%|โโโโโโโโโ | 25/30 [00:06<00:01, 4.24it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_013_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_013_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.14it/s]\u001b[A\nFeatures: 87%|โโโโโโโโโ | 26/30 [00:06<00:00, 4.25it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_014_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_014_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.15it/s]\u001b[A\nFeatures: 90%|โโโโโโโโโ | 27/30 [00:06<00:00, 4.27it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_014_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_014_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.08it/s]\u001b[A\nFeatures: 93%|โโโโโโโโโโ| 28/30 [00:06<00:00, 4.25it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_015_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_015_bottom.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.15it/s]\u001b[A\nFeatures: 97%|โโโโโโโโโโ| 29/30 [00:07<00:00, 4.27it/s]","output_type":"stream"},{"name":"stdout","text":">> Loading a list of 2 images\n - adding /kaggle/working/output/images/image_015_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_015_top.jpeg with resolution 800x800 --> 224x224\n (Found 2 images)\n>> Inference with model on 1 image pairs\n","output_type":"stream"},{"name":"stderr","text":"\n 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n100%|โโโโโโโโโโ| 1/1 [00:00<00:00, 5.09it/s]\u001b[A\nFeatures: 100%|โโโโโโโโโโ| 30/30 [00:07<00:00, 4.05it/s]\n","output_type":"stream"},{"name":"stdout","text":"โ Extracted features for 30 images\n Feature shape: torch.Size([224, 224, 3])\n\n=== Computing ASMK Similarity ===\nUsing ASMK with codebook size: 1024\nโ ๏ธ ASMK failed: operands could not be broadcast together with shapes (50176,1,3) (1,1024,24) , using cosine similarity\nSimilarity matrix shape: (30, 30)\nSimilarity range: [-1.000, 1.000]\nโ Built 149 unique pairs\nLimited to 30 pairs\nSelected 30 image pairs\n\n======================================================================\nSTEP 3: MASt3R 3D Reconstruction\n======================================================================\n\n=== Loading MASt3R Model ===\nAttempting to load: naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\nโ ๏ธ Failed to load MASt3R: tried to load naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric from huggingface, but failed\nTrying DUSt3R instead: naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\nโ Loaded DUSt3R model as fallback\nโ Model loaded on cuda\n\n=== Running MASt3R Reconstruction ===\n","output_type":"stream"},{"name":"stderr","text":"/kaggle/working/mast3r/dust3r/dust3r/cloud_opt/base_opt.py:275: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n @torch.cuda.amp.autocast(enabled=False)\n","output_type":"stream"},{"name":"stdout","text":"Selected 26 unique images from 30 pairs\n>> Loading a list of 26 images\n - adding /kaggle/working/output/images/image_001_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_001_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_002_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_003_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_004_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_004_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_005_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_005_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_006_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_006_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_007_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_008_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_008_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_009_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_009_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_010_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_010_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_011_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_012_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_012_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_013_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_013_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_014_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_014_top.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_015_bottom.jpeg with resolution 800x800 --> 224x224\n - adding /kaggle/working/output/images/image_015_top.jpeg with resolution 800x800 --> 224x224\n (Found 26 images)\nCreated 30 image pairs\nRunning inference on 30 pairs...\n>> Inference with model on 30 image pairs\n","output_type":"stream"},{"name":"stderr","text":" 0%| | 0/30 [00:00<?, ?it/s]/kaggle/working/mast3r/dust3r/dust3r/inference.py:44: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=bool(use_amp)):\n/kaggle/working/mast3r/dust3r/dust3r/model.py:206: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n/kaggle/working/mast3r/dust3r/dust3r/inference.py:48: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n with torch.cuda.amp.autocast(enabled=False):\n100%|โโโโโโโโโโ| 30/30 [00:06<00:00, 4.94it/s]\n","output_type":"stream"},{"name":"stdout","text":"โ Processed 5 predictions\nRunning global alignment...\n init edge (0*,11*) score=32.269466400146484\n init edge (11,22*) score=24.828502655029297\n init edge (18*,22) score=20.115999221801758\n init edge (9*,11) score=19.908491134643555\n init edge (11,20*) score=19.53492546081543\n init edge (2*,18) score=13.676949501037598\n init edge (2,14*) score=13.364959716796875\n init edge (3*,11) score=11.979717254638672\n init edge (4*,9) score=5.618801593780518\n init edge (11,12*) score=5.558835506439209\n init edge (6*,9) score=4.645031929016113\n init edge (10*,20) score=4.091282367706299\n init edge (8*,18) score=21.392444610595703\n init edge (1*,14) score=14.89332389831543\n init edge (12,13*) score=14.514020919799805\n init edge (13,17*) score=13.715021133422852\n init edge (10,24*) score=6.7983717918396\n init edge (15*,24) score=26.918066024780273\n init edge (1,16*) score=17.835407257080078\n init edge (7*,24) score=11.495546340942383\n init edge (5*,24) score=6.8192853927612305\n","output_type":"stream"}],"execution_count":null},{"cell_type":"code","source":"print(f\"โ np: {np.__version__} - {np.__file__}\")\n!pip show numpy | grep Version","metadata":{"trusted":true,"execution":{"execution_failed":"2026-01-22T05:41:49.309Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}
|
|
|
|
|
|