Upload 3 files
Browse files- ex12-protocol.ipynb +1 -0
- ex13-protocol.ipynb +1 -0
- ex14-protocol.ipynb +1 -0
ex12-protocol.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":49349,"databundleVersionId":5447706,"sourceType":"competition"},{"sourceId":14451718,"sourceType":"datasetVersion","datasetId":1429416}],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true},"papermill":{"default_parameters":{},"duration":20573.990788,"end_time":"2026-01-11T00:00:22.081506","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-10T18:17:28.090718","version":"2.6.0"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU"},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **biplet-dino-colmap-gs**","metadata":{"papermill":{"duration":0.002985,"end_time":"2026-01-10T18:17:32.170524","exception":false,"start_time":"2026-01-10T18:17:32.167539","status":"completed"},"tags":[],"id":"fb1f1fdc"}},{"cell_type":"code","source":"#サイズの異なる画像を扱う\nfrom google.colab import drive\ndrive.mount('/content/drive')","metadata":{"id":"JON4rYSEOzCg","outputId":"802a32ed-8ecf-40c9-ddf8-3d5ce7706299"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport sys\nimport subprocess\nimport shutil\nfrom pathlib import Path\nimport cv2\nfrom PIL import Image\nimport glob\n\nIMAGE_PATH=\"/content/drive/MyDrive/your_folder/fountain100\"\nWORK_DIR = '/content/gaussian-splatting'\nOUTPUT_DIR = '/content/output'\nCOLMAP_DIR = '/content/colmap_data'","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:17:32.181455Z","iopub.status.busy":"2026-01-10T18:17:32.180969Z","iopub.status.idle":"2026-01-10T18:17:32.355942Z","shell.execute_reply":"2026-01-10T18:17:32.355229Z"},"papermill":{"duration":0.179454,"end_time":"2026-01-10T18:17:32.357275","exception":false,"start_time":"2026-01-10T18:17:32.177821","status":"completed"},"tags":[],"id":"22353010"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"❌ Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_environment():\n \"\"\"\n Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n Python 3.12 compatible version (v8)\n \"\"\"\n\n print(\"🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n\n WORK_DIR = \"gaussian-splatting\"\n\n # =====================================================================\n # STEP 0: NumPy FIX (Python 3.12 compatible)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n print(\"=\"*70)\n\n # Python 3.12 requires numpy >= 1.26\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n\n # sanity check\n run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n\n # =====================================================================\n # STEP 1: System packages (Colab)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: System packages\")\n print(\"=\"*70)\n\n run_cmd([\"apt-get\", \"update\", \"-qq\"])\n run_cmd([\n \"apt-get\", \"install\", \"-y\", \"-qq\",\n \"colmap\",\n \"build-essential\",\n \"cmake\",\n \"git\",\n \"libopenblas-dev\",\n \"xvfb\"\n ])\n\n # virtual display (COLMAP / OpenCV safety)\n os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n os.environ[\"DISPLAY\"] = \":99\"\n subprocess.Popen(\n [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n\n # =====================================================================\n # STEP 2: Clone Gaussian Splatting\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Clone Gaussian Splatting\")\n print(\"=\"*70)\n\n if not os.path.exists(WORK_DIR):\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"✓ Repository already exists\")\n\n # =====================================================================\n # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: Python packages (VERBOSE MODE)\")\n print(\"=\"*70)\n\n # ---- PyTorch (Colab CUDA対応) ----\n print(\"\\n📦 Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n\n # ---- Core utils ----\n print(\"\\n📦 Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\"\n ])\n\n # ---- transformers (NumPy 1.26 compatible) ----\n print(\"\\n📦 Installing transformers (NumPy 1.26 compatible)...\")\n # Install transformers with proper dependencies\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n\n # ---- LightGlue stack (GITHUB INSTALL) ----\n print(\"\\n📦 Installing LightGlue stack...\")\n\n # Install kornia first\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n\n # Install h5py (sometimes needed)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n\n # Install matplotlib (LightGlue dependency)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n\n '''\n # Install LightGlue directly from GitHub (more reliable)\n print(\" Installing LightGlue from GitHub...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\",\n \"git+https://github.com/cvg/LightGlue.git\"])\n '''\n\n # Install pycolmap\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n # =====================================================================\n # STEP 4: Build GS submodules\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Build Gaussian Splatting submodules\")\n print(\"=\"*70)\n\n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n\n for name, repo in submodules.items():\n print(f\"\\n📦 Installing {name}...\")\n path = os.path.join(WORK_DIR, \"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n # =====================================================================\n # STEP 5: Detailed Verification\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Detailed Verification\")\n print(\"=\"*70)\n\n # NumPy (verify version first)\n print(\"\\n🔍 Testing NumPy...\")\n try:\n import numpy as np\n print(f\" ✓ NumPy: {np.__version__}\")\n except Exception as e:\n print(f\" ❌ NumPy failed: {e}\")\n\n # PyTorch\n print(\"\\n🔍 Testing PyTorch...\")\n try:\n import torch\n print(f\" ✓ PyTorch: {torch.__version__}\")\n print(f\" ✓ CUDA available: {torch.cuda.is_available()}\")\n if torch.cuda.is_available():\n print(f\" ✓ CUDA version: {torch.version.cuda}\")\n except Exception as e:\n print(f\" ❌ PyTorch failed: {e}\")\n\n # transformers\n print(\"\\n🔍 Testing transformers...\")\n try:\n import transformers\n print(f\" ✓ transformers version: {transformers.__version__}\")\n from transformers import AutoModel\n print(f\" ✓ AutoModel import: OK\")\n except Exception as e:\n print(f\" ❌ transformers failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"import transformers; print(transformers.__version__)\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n\n '''\n # LightGlue\n print(\"\\n🔍 Testing LightGlue...\")\n try:\n from lightglue import LightGlue, ALIKED\n print(f\" ✓ LightGlue: OK\")\n print(f\" ✓ ALIKED: OK\")\n except Exception as e:\n print(f\" ❌ LightGlue failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"from lightglue import LightGlue\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n '''\n\n # pycolmap\n print(\"\\n🔍 Testing pycolmap...\")\n try:\n import pycolmap\n print(f\" ✓ pycolmap: OK\")\n except Exception as e:\n print(f\" ❌ pycolmap failed: {e}\")\n\n # kornia\n print(\"\\n🔍 Testing kornia...\")\n try:\n import kornia\n print(f\" ✓ kornia: {kornia.__version__}\")\n except Exception as e:\n print(f\" ❌ kornia failed: {e}\")\n\n print(\"\\n\" + \"=\"*70)\n print(\"✅ SETUP COMPLETE\")\n print(\"=\"*70)\n print(f\"Working dir: {WORK_DIR}\")\n\n return WORK_DIR\n\n\nif __name__ == \"__main__\":\n setup_environment()","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:17:32.363444Z","iopub.status.busy":"2026-01-10T18:17:32.363175Z","iopub.status.idle":"2026-01-10T18:22:43.720241Z","shell.execute_reply":"2026-01-10T18:22:43.71938Z"},"papermill":{"duration":311.361656,"end_time":"2026-01-10T18:22:43.72161","exception":false,"start_time":"2026-01-10T18:17:32.359954","status":"completed"},"tags":[],"id":"be6df249","outputId":"dd5c68e7-6606-4e7d-c4d3-02fac5253a0f"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport glob\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n# =========================================================\n# Utility: aspect ratio preserved + black padding\n# =========================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory and returns the output directory\n and the list of generated file paths.\n\n Args:\n input_dir: Input directory containing source images\n output_dir: Output directory for processed images\n size: Target square size (default: 1024)\n max_images: Maximum number of SOURCE images to process (default: None = all images)\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"--- Step 1: Biplet-Square Normalization ---\")\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n generated_paths = []\n converted_count = 0\n size_stats = {}\n\n # Sort for consistent processing order\n image_files = sorted([f for f in os.listdir(input_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n\n # ★ max_images で元画像数を制限\n if max_images is not None:\n image_files = image_files[:max_images]\n print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n\n for img_file in image_files:\n input_path = os.path.join(input_dir, img_file)\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n # Tracking original aspect ratios\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops using the helper function\n crops = generate_two_crops(img, size)\n base_name, ext = os.path.splitext(img_file)\n\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n generated_paths.append(output_path)\n\n converted_count += 1\n print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Total output images: {len(generated_paths)}\")\n print(f\"Original size distribution: {size_stats}\")\n\n return output_dir, generated_paths\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops\n","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:22:43.739411Z","iopub.status.busy":"2026-01-10T18:22:43.738855Z","iopub.status.idle":"2026-01-10T18:22:43.755664Z","shell.execute_reply":"2026-01-10T18:22:43.754865Z"},"papermill":{"duration":0.027297,"end_time":"2026-01-10T18:22:43.756758","exception":false,"start_time":"2026-01-10T18:22:43.729461","status":"completed"},"tags":[],"id":"b8690389"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def run_colmap_reconstruction(image_dir, colmap_dir):\n \"\"\"Estimate camera poses and 3D point cloud with COLMAP\"\"\"\n print(\"Running SfM reconstruction with COLMAP...\")\n\n database_path = os.path.join(colmap_dir, \"database.db\")\n sparse_dir = os.path.join(colmap_dir, \"sparse\")\n os.makedirs(sparse_dir, exist_ok=True)\n\n # Set environment variable\n env = os.environ.copy()\n env['QT_QPA_PLATFORM'] = 'offscreen'\n\n # Feature extraction\n print(\"1/4: Extracting features...\")\n subprocess.run([\n 'colmap', 'feature_extractor',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--ImageReader.single_camera', '1',\n '--ImageReader.camera_model', 'OPENCV',\n '--SiftExtraction.use_gpu', '0' # Use CPU\n ], check=True, env=env)\n\n # Feature matching\n print(\"2/4: Matching features...\")\n subprocess.run([\n 'colmap', 'exhaustive_matcher', # Use sequential_matcher instead of exhaustive_matcher\n '--database_path', database_path,\n '--SiftMatching.use_gpu', '0' # Use CPU\n ], check=True, env=env)\n\n # Sparse reconstruction\n print(\"3/4: Sparse reconstruction...\")\n subprocess.run([\n 'colmap', 'mapper',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--output_path', sparse_dir,\n '--Mapper.ba_global_max_num_iterations', '20', # Speed up\n '--Mapper.ba_local_max_num_iterations', '10'\n ], check=True, env=env)\n\n # Export to text format\n print(\"4/4: Exporting to text format...\")\n model_dir = os.path.join(sparse_dir, '0')\n if not os.path.exists(model_dir):\n # Use the first model found\n subdirs = [d for d in os.listdir(sparse_dir) if os.path.isdir(os.path.join(sparse_dir, d))]\n if subdirs:\n model_dir = os.path.join(sparse_dir, subdirs[0])\n else:\n raise FileNotFoundError(\"COLMAP reconstruction failed\")\n\n subprocess.run([\n 'colmap', 'model_converter',\n '--input_path', model_dir,\n '--output_path', model_dir,\n '--output_type', 'TXT'\n ], check=True, env=env)\n\n print(f\"COLMAP reconstruction complete: {model_dir}\")\n return model_dir\n\n\ndef convert_cameras_to_pinhole(input_file, output_file):\n \"\"\"Convert camera model to PINHOLE format\"\"\"\n print(f\"Reading camera file: {input_file}\")\n\n with open(input_file, 'r') as f:\n lines = f.readlines()\n\n converted_count = 0\n with open(output_file, 'w') as f:\n for line in lines:\n if line.startswith('#') or line.strip() == '':\n f.write(line)\n else:\n parts = line.strip().split()\n if len(parts) >= 4:\n cam_id = parts[0]\n model = parts[1]\n width = parts[2]\n height = parts[3]\n params = parts[4:]\n\n # Convert to PINHOLE format\n if model == \"PINHOLE\":\n f.write(line)\n elif model == \"OPENCV\":\n # OPENCV: fx, fy, cx, cy, k1, k2, p1, p2\n fx = params[0]\n fy = params[1]\n cx = params[2]\n cy = params[3]\n f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n converted_count += 1\n else:\n # Convert other models too\n fx = fy = max(float(width), float(height))\n cx = float(width) / 2\n cy = float(height) / 2\n f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n converted_count += 1\n else:\n f.write(line)\n\n print(f\"Converted {converted_count} cameras to PINHOLE format\")\n\n\ndef prepare_gaussian_splatting_data(image_dir, colmap_model_dir):\n \"\"\"Prepare data for Gaussian Splatting\"\"\"\n print(\"Preparing data for Gaussian Splatting...\")\n\n data_dir = f\"{WORK_DIR}/data/video\"\n os.makedirs(f\"{data_dir}/sparse/0\", exist_ok=True)\n os.makedirs(f\"{data_dir}/images\", exist_ok=True)\n\n # Copy images\n print(\"Copying images...\")\n img_count = 0\n for img_file in os.listdir(image_dir):\n if img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n shutil.copy(\n os.path.join(image_dir, img_file),\n f\"{data_dir}/images/{img_file}\"\n )\n img_count += 1\n print(f\"Copied {img_count} images\")\n\n # Convert and copy camera file to PINHOLE format\n print(\"Converting camera model to PINHOLE format...\")\n convert_cameras_to_pinhole(\n os.path.join(colmap_model_dir, 'cameras.txt'),\n f\"{data_dir}/sparse/0/cameras.txt\"\n )\n\n # Copy other files\n for filename in ['images.txt', 'points3D.txt']:\n src = os.path.join(colmap_model_dir, filename)\n dst = f\"{data_dir}/sparse/0/{filename}\"\n if os.path.exists(src):\n shutil.copy(src, dst)\n print(f\"Copied {filename}\")\n else:\n print(f\"Warning: {filename} not found\")\n\n print(f\"Data preparation complete: {data_dir}\")\n return data_dir\n\n\ndef train_gaussian_splatting(data_dir, iterations=3000):\n \"\"\"Train the Gaussian Splatting model\"\"\"\n print(f\"Training Gaussian Splatting model for {iterations} iterations...\")\n\n model_path = f\"{WORK_DIR}/output/video\"\n\n cmd = [\n sys.executable, 'train.py',\n '-s', data_dir,\n '-m', model_path,\n '--iterations', str(iterations),\n '--eval'\n ]\n\n subprocess.run(cmd, cwd=WORK_DIR, check=True)\n\n return model_path\n\n\ndef render_video(model_path, output_video_path, iteration=3000):\n \"\"\"Generate video from the trained model\"\"\"\n print(\"Rendering video...\")\n\n # Execute rendering\n cmd = [\n sys.executable, 'render.py',\n '-m', model_path,\n '--iteration', str(iteration)\n ]\n\n subprocess.run(cmd, cwd=WORK_DIR, check=True)\n\n # Find the rendering directory\n possible_dirs = [\n f\"{model_path}/test/ours_{iteration}/renders\",\n f\"{model_path}/train/ours_{iteration}/renders\",\n ]\n\n render_dir = None\n for test_dir in possible_dirs:\n if os.path.exists(test_dir):\n render_dir = test_dir\n print(f\"Rendering directory found: {render_dir}\")\n break\n\n if render_dir and os.path.exists(render_dir):\n render_imgs = sorted([f for f in os.listdir(render_dir) if f.endswith('.png')])\n\n if render_imgs:\n print(f\"Found {len(render_imgs)} rendered images\")\n\n # Create video with ffmpeg\n subprocess.run([\n 'ffmpeg', '-y',\n '-framerate', '30',\n '-pattern_type', 'glob',\n '-i', f\"{render_dir}/*.png\",\n '-c:v', 'libx264',\n '-pix_fmt', 'yuv420p',\n '-crf', '18',\n output_video_path\n ], check=True)\n\n print(f\"Video saved: {output_video_path}\")\n return True\n\n print(\"Error: Rendering directory not found\")\n return False\n\n\ndef create_gif(video_path, gif_path):\n \"\"\"Create GIF from MP4\"\"\"\n print(\"Creating animated GIF...\")\n\n subprocess.run([\n 'ffmpeg', '-y',\n '-i', video_path,\n '-vf', 'setpts=8*PTS,fps=10,scale=720:-1:flags=lanczos',\n '-loop', '0',\n gif_path\n ], check=True)\n\n if os.path.exists(gif_path):\n size_mb = os.path.getsize(gif_path) / (1024 * 1024)\n print(f\"GIF creation complete: {gif_path} ({size_mb:.2f} MB)\")\n return True\n\n return False","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:22:43.772525Z","iopub.status.busy":"2026-01-10T18:22:43.772303Z","iopub.status.idle":"2026-01-10T18:22:43.790574Z","shell.execute_reply":"2026-01-10T18:22:43.789515Z"},"papermill":{"duration":0.027612,"end_time":"2026-01-10T18:22:43.791681","exception":false,"start_time":"2026-01-10T18:22:43.764069","status":"completed"},"tags":[],"id":"7acc20b6"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024, max_images=100):\n \"\"\"Main execution function\"\"\"\n try:\n # Step 1: 画像の正規化と前処理\n print(\"=\"*60)\n print(\"Step 1: Normalizing and preprocessing images\")\n print(\"=\"*60)\n\n frame_dir = os.path.join(COLMAP_DIR, \"images\")\n os.makedirs(frame_dir, exist_ok=True)\n\n # 画像を正規化して直接COLMAPのディレクトリに保存\n num_processed = normalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=frame_dir, # 直接colmap/imagesに保存\n size=square_size,\n max_images=max_images\n )\n\n print(f\"Processed {num_processed} images\")\n\n # Step 2: Estimate Camera Info with COLMAP\n print(\"=\"*60)\n print(\"Step 2: Running COLMAP reconstruction\")\n print(\"=\"*60)\n colmap_model_dir = run_colmap_reconstruction(frame_dir, COLMAP_DIR)\n\n # Step 3: Prepare Data for Gaussian Splatting\n print(\"=\"*60)\n print(\"Step 3: Preparing Gaussian Splatting data\")\n print(\"=\"*60)\n data_dir = prepare_gaussian_splatting_data(frame_dir, colmap_model_dir)\n\n # Step 4: Train Model\n print(\"=\"*60)\n print(\"Step 4: Training Gaussian Splatting model\")\n print(\"=\"*60)\n model_path = train_gaussian_splatting(data_dir, iterations=3000)\n\n # Step 5: Render Video\n print(\"=\"*60)\n print(\"Step 5: Rendering video\")\n print(\"=\"*60)\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n output_video = os.path.join(OUTPUT_DIR, \"gaussian_splatting_video.mp4\")\n\n success = render_video(model_path, output_video, iteration=3000)\n\n if success:\n print(\"=\"*60)\n print(f\"Success! Video generation complete: {output_video}\")\n print(\"=\"*60)\n\n # Create GIF\n output_gif = os.path.join(OUTPUT_DIR, \"gaussian_splatting_video.gif\")\n create_gif(output_video, output_gif)\n\n # Display result\n from IPython.display import Image, display\n display(Image(open(output_gif, 'rb').read()))\n\n return output_video, output_gif\n else:\n print(\"Warning: Rendering complete, but video was not generated\")\n return None, None\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n import traceback\n traceback.print_exc()\n return None, None\n\n\nif __name__ == \"__main__\":\n IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n OUTPUT_DIR = \"/content/output\"\n COLMAP_DIR = \"/content/colmap_workspace\"\n\n video_path, gif_path = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024,\n max_images=30\n )\n\n\n","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:22:43.807508Z","iopub.status.busy":"2026-01-10T18:22:43.807294Z","iopub.status.idle":"2026-01-11T00:00:17.03089Z","shell.execute_reply":"2026-01-11T00:00:17.029927Z"},"papermill":{"duration":20253.434865,"end_time":"2026-01-11T00:00:17.234174","exception":false,"start_time":"2026-01-10T18:22:43.799309","status":"completed"},"tags":[],"id":"f75233a8","outputId":"d86dfee5-6f09-403d-c947-5fcd689d067b"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"","metadata":{"papermill":{"duration":0.49801,"end_time":"2026-01-11T00:00:18.165833","exception":false,"start_time":"2026-01-11T00:00:17.667823","status":"completed"},"tags":[],"id":"e17ec719"}},{"cell_type":"markdown","source":"","metadata":{"papermill":{"duration":0.427583,"end_time":"2026-01-11T00:00:19.008387","exception":false,"start_time":"2026-01-11T00:00:18.580804","status":"completed"},"tags":[],"id":"38b3974c"}}]}
|
ex13-protocol.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"d54a3ca373d748c3b6063f7944d320f5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_bef87068a4d94bef9dfce33a9d23e089","IPY_MODEL_8f3ae5baabbf44f3bf925d8cd3d6a664","IPY_MODEL_ab45b6e07fa74763abee8739860ef802"],"layout":"IPY_MODEL_91cf3d40894944d2a6e5731c38ebc6d8"}},"bef87068a4d94bef9dfce33a9d23e089":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3ff85810885447dd916ab049d2f71bc1","placeholder":"","style":"IPY_MODEL_d7cf74fecf3c4688a72ce6553c022b47","value":"preprocessor_config.json: 100%"}},"8f3ae5baabbf44f3bf925d8cd3d6a664":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_d7c893483ed64098af4bf9fb3d52511f","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b4372bf488d842dd9d41633d838e97fe","value":436}},"ab45b6e07fa74763abee8739860ef802":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5501ecdc0c3e4920a04879ffc8335449","placeholder":"","style":"IPY_MODEL_7b082beb8b304620b20cca2b117d3d19","value":" 436/436 [00:00<00:00, 21.4kB/s]"}},"91cf3d40894944d2a6e5731c38ebc6d8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3ff85810885447dd916ab049d2f71bc1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d7cf74fecf3c4688a72ce6553c022b47":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d7c893483ed64098af4bf9fb3d52511f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b4372bf488d842dd9d41633d838e97fe":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5501ecdc0c3e4920a04879ffc8335449":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7b082beb8b304620b20cca2b117d3d19":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3caf6ce3be86419eab4b3a5cc92c0f3e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f84dd43b90ff4f5badcb411ef170713a","IPY_MODEL_def430e09227485d854321e94fb17549","IPY_MODEL_f94ec42f81084a399ee425790cb3cf8a"],"layout":"IPY_MODEL_d8d50fd791fd4098ab1691b3a3a23c50"}},"f84dd43b90ff4f5badcb411ef170713a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e479418ea8144971873f7e2c195d6002","placeholder":"","style":"IPY_MODEL_ae4a9989d81b446299751cb1f1d68f97","value":"config.json: 100%"}},"def430e09227485d854321e94fb17549":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6f7ac7e4c90645dfae815d4a3c54459b","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_0a82e9b8ab35464bbdcae65f2bf35d5a","value":548}},"f94ec42f81084a399ee425790cb3cf8a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a61c105a4ed546fd907041026dc87beb","placeholder":"","style":"IPY_MODEL_9e55321fa5cb4ef8a170e7ec6a9aaa9f","value":" 548/548 [00:00<00:00, 17.7kB/s]"}},"d8d50fd791fd4098ab1691b3a3a23c50":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e479418ea8144971873f7e2c195d6002":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ae4a9989d81b446299751cb1f1d68f97":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6f7ac7e4c90645dfae815d4a3c54459b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0a82e9b8ab35464bbdcae65f2bf35d5a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a61c105a4ed546fd907041026dc87beb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9e55321fa5cb4ef8a170e7ec6a9aaa9f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"884613eba51d462b89ee37461de1af2d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_80d18b82487548818e2844f1708ff058","IPY_MODEL_f3a4ec7390064c848a377a85d8aa09de","IPY_MODEL_783b7fa88d30476ca3b75e43fb33dbf7"],"layout":"IPY_MODEL_6f5989c42d6746519c6bde617ed0f8e7"}},"80d18b82487548818e2844f1708ff058":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_87341d9784a34784b9a7209189bc2581","placeholder":"","style":"IPY_MODEL_46eae6702f3c4614a1e444d3b3f3da18","value":"model.safetensors: 100%"}},"f3a4ec7390064c848a377a85d8aa09de":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_dbf5c8875e9f46ccb647b4204fa429a5","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_22523fb1c2964a049a65b0e2b0f3b5ec","value":346345912}},"783b7fa88d30476ca3b75e43fb33dbf7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8dfdb681aad14507b2709740f2e58362","placeholder":"","style":"IPY_MODEL_0412c710886045a6a9bbc752582202f3","value":" 346M/346M [00:03<00:00, 143MB/s]"}},"6f5989c42d6746519c6bde617ed0f8e7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"87341d9784a34784b9a7209189bc2581":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46eae6702f3c4614a1e444d3b3f3da18":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dbf5c8875e9f46ccb647b4204fa429a5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"22523fb1c2964a049a65b0e2b0f3b5ec":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8dfdb681aad14507b2709740f2e58362":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0412c710886045a6a9bbc752582202f3":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f788806a652747f7a20dbcdd51b09f6a":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f29a9fccd0fc434baa33de09787f6a23","IPY_MODEL_2e1519a3c5604327b0dba766b4991e6c","IPY_MODEL_db7c7b60e2934671a2fab26567da72f7"],"layout":"IPY_MODEL_60de62d02d6742f8ade69790686576e5"}},"f29a9fccd0fc434baa33de09787f6a23":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_894372b0803243448de8c28636d6a699","placeholder":"","style":"IPY_MODEL_70c0f25009a442a7a7d391ddbac4a69e","value":"Loading weights: 100%"}},"2e1519a3c5604327b0dba766b4991e6c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5eed7b16dfe2443cac3a0601e438be41","max":223,"min":0,"orientation":"horizontal","style":"IPY_MODEL_248829e8205e47f1a2f316055002e0d4","value":223}},"db7c7b60e2934671a2fab26567da72f7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_fa00b8691718403ca9a2c7d23d06bab6","placeholder":"","style":"IPY_MODEL_ea24828291cd41e7b6fa5946b55d426e","value":" 223/223 [00:00<00:00, 443.22it/s, Materializing param=layernorm.weight]"}},"60de62d02d6742f8ade69790686576e5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"894372b0803243448de8c28636d6a699":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"70c0f25009a442a7a7d391ddbac4a69e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5eed7b16dfe2443cac3a0601e438be41":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"248829e8205e47f1a2f316055002e0d4":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"fa00b8691718403ca9a2c7d23d06bab6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ea24828291cd41e7b6fa5946b55d426e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}},"kaggle":{"accelerator":"none","dataSources":[],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **biplet-dino-lightglue-colmap-gs**\n","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"# **setup**","metadata":{"id":"vXt8y7QyyRn9"}},{"cell_type":"code","source":"from google.colab import drive\ndrive.mount('/content/drive')","metadata":{"id":"t9kAhlZHTIqC","outputId":"875640ed-fd53-4ab2-e6cb-d623898f925d"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport sys\nimport subprocess\nfrom PIL import Image\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"❌ Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\ndef setup_environment():\n \"\"\"\n Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n Python 3.12 compatible version (v8)\n \"\"\"\n\n print(\"🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n\n WORK_DIR = \"/content/gaussian-splatting\"\n\n # =====================================================================\n # STEP 0: NumPy FIX (Python 3.12 compatible)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n print(\"=\"*70)\n\n # Python 3.12 requires numpy >= 1.26\n #run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n #run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy\"]) #no need to be 1.26.4\n\n # sanity check\n #run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n\n # =====================================================================\n # STEP 1: System packages (Colab)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: System packages\")\n print(\"=\"*70)\n\n run_cmd([\"apt-get\", \"update\", \"-qq\"])\n run_cmd([\n \"apt-get\", \"install\", \"-y\", \"-qq\",\n \"colmap\",\n \"build-essential\",\n \"cmake\",\n \"git\",\n \"libopenblas-dev\",\n \"xvfb\"\n ])\n\n # virtual display (COLMAP / OpenCV safety)\n os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n os.environ[\"DISPLAY\"] = \":99\"\n subprocess.Popen(\n [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n\n # =====================================================================\n # STEP 2: Clone Gaussian Splatting\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Clone Gaussian Splatting\")\n print(\"=\"*70)\n\n if not os.path.exists(WORK_DIR):\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"✓ Repository already exists\")\n\n # =====================================================================\n # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: Python packages (VERBOSE MODE)\")\n print(\"=\"*70)\n\n # ---- PyTorch (Colab CUDA対応) ----\n print(\"\\n📦 Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n\n # ---- Core utils ----\n print(\"\\n📦 Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\"\n ])\n\n # ---- transformers (NumPy 1.26 compatible) ----\n print(\"\\n📦 Installing transformers...\")\n # Install transformers with proper dependencies\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers>=4.45.0\"\n ])\n\n # ---- LightGlue stack (GITHUB INSTALL) ----\n print(\"\\n📦 Installing LightGlue stack...\")\n\n # Install kornia first\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n\n # Install h5py (sometimes needed)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n\n # Install matplotlib (LightGlue dependency)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n\n # Install LightGlue directly from GitHub (more reliable)\n print(\" Installing LightGlue from GitHub...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\",\n \"git+https://github.com/cvg/LightGlue.git\"])\n\n # Install pycolmap\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n # =====================================================================\n # STEP 4: Build GS submodules\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Build Gaussian Splatting submodules\")\n print(\"=\"*70)\n\n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n\n for name, repo in submodules.items():\n print(f\"\\n📦 Installing {name}...\")\n path = os.path.join(WORK_DIR, \"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n # =====================================================================\n # STEP 5: Detailed Verification\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Detailed Verification\")\n print(\"=\"*70)\n\n # NumPy (verify version first)\n print(\"\\n🔍 Testing NumPy...\")\n try:\n import numpy as np\n print(f\" ✓ NumPy: {np.__version__}\")\n except Exception as e:\n print(f\" ❌ NumPy failed: {e}\")\n\n # PyTorch\n print(\"\\n🔍 Testing PyTorch...\")\n try:\n import torch\n print(f\" ✓ PyTorch: {torch.__version__}\")\n print(f\" ✓ CUDA available: {torch.cuda.is_available()}\")\n if torch.cuda.is_available():\n print(f\" ✓ CUDA version: {torch.version.cuda}\")\n except Exception as e:\n print(f\" ❌ PyTorch failed: {e}\")\n\n # transformers\n print(\"\\n🔍 Testing transformers...\")\n try:\n import transformers\n print(f\" ✓ transformers version: {transformers.__version__}\")\n from transformers import AutoModel\n print(f\" ✓ AutoModel import: OK\")\n except Exception as e:\n print(f\" ❌ transformers failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"import transformers; print(transformers.__version__)\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n\n # LightGlue\n print(\"\\n🔍 Testing LightGlue...\")\n try:\n from lightglue import LightGlue, ALIKED\n print(f\" ✓ LightGlue: OK\")\n print(f\" ✓ ALIKED: OK\")\n except Exception as e:\n print(f\" ❌ LightGlue failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"from lightglue import LightGlue\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n\n # pycolmap\n print(\"\\n🔍 Testing pycolmap...\")\n try:\n import pycolmap\n print(f\" ✓ pycolmap: OK\")\n except Exception as e:\n print(f\" ❌ pycolmap failed: {e}\")\n\n # kornia\n print(\"\\n🔍 Testing kornia...\")\n try:\n import kornia\n print(f\" ✓ kornia: {kornia.__version__}\")\n except Exception as e:\n print(f\" ❌ kornia failed: {e}\")\n\n print(\"\\n\" + \"=\"*70)\n print(\"✅ SETUP COMPLETE\")\n print(\"=\"*70)\n print(f\"Working dir: {WORK_DIR}\")\n\n\n\n print()\n print(\"-------!pip show numpy | grep Version--------\")\n !pip show numpy | grep Version\n\n\n return WORK_DIR\n\n\nif __name__ == \"__main__\":\n setup_environment()","metadata":{"id":"z6cBHbABzZ0F","outputId":"17806e8d-1dad-4882-f56d-7c3276e0c170"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\n\n%cd /content/gaussian-splatting\n\nfiles = ['database.py', 'h5_to_db.py', 'metric.py']\nbase_url = 'https://huggingface.co/stpete2/imc25_utils/resolve/main/'\n\nfor file in files:\n if not os.path.exists(file):\n !wget -q {base_url + file}\n print(f\"✓ {file} download complete\")\n else:\n print(f\"✓ {file} already exists\")\n","metadata":{"id":"eJrkKiCLzt1G","outputId":"850f6c00-2873-4d47-e341-b4f20dfb902a"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **install libraries**","metadata":{"id":"DwyCRLt4yYfx"}},{"cell_type":"code","source":"","metadata":{"id":"uznATYrilO-N"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from database import COLMAPDatabase, image_ids_to_pair_id\nfrom h5_to_db import add_keypoints, add_matches\nfrom metric import *","metadata":{"id":"WVr8ggyVuq6q"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\"\"\"\nGaussian Splatting Pipeline\nSimple and robust pipeline: LightGlue → COLMAP → Gaussian Splatting\n\"\"\"\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\n\n# LightGlue\nfrom lightglue import ALIKED, LightGlue\nfrom lightglue.utils import load_image\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection\n GLOBAL_TOPK = 200\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\" # Change if using local path\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')","metadata":{"id":"7NfrJdMvrPZn"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory and returns the output directory\n and the list of generated file paths.\n\n Args:\n input_dir: Input directory containing source images\n output_dir: Output directory for processed images\n size: Target square size (default: 1024)\n max_images: Maximum number of SOURCE images to process (default: None = all images)\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"--- Step 1: Biplet-Square Normalization ---\")\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n generated_paths = []\n converted_count = 0\n size_stats = {}\n\n # Sort for consistent processing order\n image_files = sorted([f for f in os.listdir(input_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n\n # ★ max_images で元画像数を制限\n if max_images is not None:\n image_files = image_files[:max_images]\n print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n\n for img_file in image_files:\n input_path = os.path.join(input_dir, img_file)\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n # Tracking original aspect ratios\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops using the helper function\n crops = generate_two_crops(img, size)\n base_name, ext = os.path.splitext(img_file)\n\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n generated_paths.append(output_path)\n\n converted_count += 1\n print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Total output images: {len(generated_paths)}\")\n print(f\"Original size distribution: {size_stats}\")\n\n return output_dir, generated_paths\n\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops","metadata":{"id":"A6smO9X0el3d"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 1: Image Pair Selection (DINO + ALIKED local verify)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n from PIL import Image\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n for img_path in tqdm(image_paths):\n img = load_torch_image(img_path, device)\n with torch.no_grad():\n inputs = processor(images=img, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model\n torch.cuda.empty_cache()\n gc.collect()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n return list(set(pairs))\n\ndef extract_aliked_features(image_paths, device):\n \"\"\"Extract ALIKED local features\"\"\"\n print(\"\\n=== Extracting ALIKED Local Features ===\")\n\n extractor = ALIKED(\n model_name=\"aliked-n16\",\n max_num_keypoints=Config.N_KEYPOINTS,\n detection_threshold=0.01,\n resize=Config.IMAGE_SIZE\n ).eval().to(device)\n\n features = []\n for img_path in tqdm(image_paths):\n img = load_torch_image(img_path, device)\n with torch.no_grad():\n feats = extractor.extract(img)\n kpts = feats['keypoints'].reshape(-1, 2).cpu()\n descs = feats['descriptors'].reshape(len(kpts), -1).cpu()\n features.append({'keypoints': kpts, 'descriptors': descs})\n\n del extractor\n torch.cuda.empty_cache()\n gc.collect()\n\n return features\n\ndef verify_pairs_locally(pairs, features, device, threshold=Config.MIN_MATCHES):\n \"\"\"Verify pairs using local descriptor matching\"\"\"\n print(\"\\n=== Verifying Pairs with Local Features ===\")\n\n verified = []\n for i, j in tqdm(pairs):\n desc1 = features[i]['descriptors'].to(device)\n desc2 = features[j]['descriptors'].to(device)\n\n if len(desc1) == 0 or len(desc2) == 0:\n continue\n\n # Simple mutual nearest neighbor\n dist = torch.cdist(desc1, desc2, p=2)\n min_dist = dist.min(dim=1)[0]\n n_matches = (min_dist < Config.RATIO_THR).sum().item()\n\n if n_matches >= threshold:\n verified.append((i, j))\n\n return verified\n\ndef get_image_pairs(image_paths):\n \"\"\"Main pair selection pipeline\"\"\"\n device = Config.DEVICE\n\n # 1. DINO global\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from global features: {len(pairs)}\")\n\n # 2. ALIKED local\n features = extract_aliked_features(image_paths, device)\n\n # 3. Local verification\n verified_pairs = verify_pairs_locally(pairs, features, device)\n\n print(f\"Verified pairs: {len(verified_pairs)}\")\n\n return verified_pairs, features","metadata":{"id":"FNjFURfYmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 2: Feature Matching (ALIKED + LightGlue)\n# ============================================================================\n\ndef match_pairs_lightglue(image_paths, pairs, features, output_dir):\n \"\"\"\n Match image pairs using LightGlue\n \"\"\"\n print(\"\\n=== Matching with LightGlue ===\")\n\n os.makedirs(output_dir, exist_ok=True)\n keypoints_path = os.path.join(output_dir, 'keypoints.h5')\n matches_path = os.path.join(output_dir, 'matches.h5')\n\n if os.path.exists(keypoints_path):\n os.remove(keypoints_path)\n if os.path.exists(matches_path):\n os.remove(matches_path)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n extractor = ALIKED(max_num_keypoints=4096, detection_threshold=0.2, nms_radius=2).eval().to(device)\n matcher = LightGlue(features='aliked').eval().to(device)\n\n if isinstance(features, dict):\n all_keypoints = features['keypoints']\n all_descriptors = features['descriptors']\n elif isinstance(features, list):\n all_keypoints = [f['keypoints'] for f in features]\n all_descriptors = [f['descriptors'] for f in features]\n else:\n raise ValueError(f\"Unsupported features type: {type(features)}\")\n\n with h5py.File(keypoints_path, 'w') as f_kp:\n for idx, img_path in enumerate(tqdm(image_paths, desc=\"Saving keypoints\")):\n img_name = os.path.splitext(os.path.basename(img_path))[0]\n\n kp = all_keypoints[idx]\n if torch.is_tensor(kp):\n kp = kp.cpu().numpy()\n f_kp.create_dataset(img_name, data=kp)\n\n # Match pairs\n with h5py.File(matches_path, 'w') as f_match:\n for idx1, idx2 in tqdm(pairs, desc=\"Matching\"):\n with torch.no_grad():\n kp0 = all_keypoints[idx1]\n kp1 = all_keypoints[idx2]\n desc0 = all_descriptors[idx1]\n desc1 = all_descriptors[idx2]\n\n if isinstance(kp0, np.ndarray):\n kp0 = torch.from_numpy(kp0).float().to(device)\n kp1 = torch.from_numpy(kp1).float().to(device)\n desc0 = torch.from_numpy(desc0).float().to(device)\n desc1 = torch.from_numpy(desc1).float().to(device)\n else:\n kp0 = kp0.float().to(device)\n kp1 = kp1.float().to(device)\n desc0 = desc0.float().to(device)\n desc1 = desc1.float().to(device)\n\n feats0 = {\n 'keypoints': kp0.unsqueeze(0) if kp0.dim() == 2 else kp0,\n 'descriptors': desc0.unsqueeze(0) if desc0.dim() == 2 else desc0,\n }\n feats1 = {\n 'keypoints': kp1.unsqueeze(0) if kp1.dim() == 2 else kp1,\n 'descriptors': desc1.unsqueeze(0) if desc1.dim() == 2 else desc1,\n }\n\n matches01 = matcher({'image0': feats0, 'image1': feats1})\n\n if 'matches0' in matches01:\n matches0 = matches01['matches0'].cpu().numpy()\n if matches0.ndim > 1:\n matches0 = matches0[0]\n valid = matches0 > -1\n matches = np.stack([np.where(valid)[0], matches0[valid]], axis=1)\n elif 'matches' in matches01:\n m = matches01['matches']\n if isinstance(m, list):\n matches = np.array(m)\n elif hasattr(m, 'cpu'):\n matches = m.cpu().numpy()\n else:\n matches = np.array(m)\n else:\n continue\n\n if len(matches) > 0:\n img_name1 = os.path.splitext(os.path.basename(image_paths[idx1]))[0]\n img_name2 = os.path.splitext(os.path.basename(image_paths[idx2]))[0]\n pair_key = f\"{img_name1}_{img_name2}\"\n f_match.create_dataset(pair_key, data=matches)\n\n print(f\"✓ Matches saved to {matches_path}\")\n","metadata":{"id":"X-PKgmdwmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 3: Import to COLMAP\n# ============================================================================\n\ndef import_to_colmap(image_dir, feature_dir, database_path, single_camera=True):\n \"\"\"\n Import features and matches to COLMAP database\n\n Args:\n image_dir (str): Directory containing the images.\n feature_dir (str): Directory to save/load extracted features.\n database_path (str): Path to the database file.\n single_camera (bool): Set to True if all images have the same dimensions (e.g., pre-resized).\n \"\"\"\n print(\"\\n=== Creating COLMAP Database ===\")\n\n if os.path.exists(database_path):\n os.remove(database_path)\n print(f\"✓ Removed existing database\")\n\n db = COLMAPDatabase.connect(database_path)\n db.create_tables()\n\n print(f\"Single camera mode: {single_camera}\")\n\n image_files = [f for f in os.listdir(image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))]\n if not image_files:\n raise ValueError(f\"No images found in {image_dir}\")\n\n first_image = sorted(image_files)[0]\n img_ext = os.path.splitext(first_image)[1]\n print(f\"Detected image extension: '{img_ext}'\")\n\n fname_to_id = add_keypoints(\n db,\n feature_dir,\n image_dir,\n img_ext,\n 'PINHOLE',\n single_camera=single_camera\n )\n\n add_matches(db, feature_dir, fname_to_id)\n db.commit()\n db.close()\n\n print(f\"✓ Database created: {database_path}\")\n\n# ============================================================================\n# Step 4: Run COLMAP Mapper\n# ============================================================================\n\ndef run_colmap_mapper(database_path, image_dir, output_dir):\n \"\"\"\n Run COLMAP mapper with verbose output\n \"\"\"\n print(\"\\n=== Running COLMAP Reconstruction ===\")\n os.makedirs(output_dir, exist_ok=True)\n cmd = [\n 'colmap', 'mapper',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--output_path', output_dir,\n '--Mapper.ba_refine_focal_length', '0',\n '--Mapper.ba_refine_principal_point', '0',\n '--Mapper.ba_refine_extra_params', '0',\n '--Mapper.min_num_matches', '15',\n '--Mapper.init_min_num_inliers', '50',\n '--Mapper.max_num_models', '1',\n '--Mapper.num_threads', '16',\n ]\n print(f\"Command: {' '.join(cmd)}\\n\")\n\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n for line in process.stdout:\n print(line, end='')\n process.wait()\n if process.returncode == 0:\n model_dir = os.path.join(output_dir, '0')\n if os.path.exists(model_dir):\n print(f\"\\n✓ COLMAP reconstruction complete: {model_dir}\")\n return model_dir\n raise RuntimeError(\"COLMAP reconstruction failed\")","metadata":{"id":"NJedFruCmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 5: Convert to Gaussian Splatting Format (if needed)\n# ============================================================================\n\ndef convert_to_gs_format(colmap_model_dir, output_dir):\n \"\"\"\n Verify COLMAP output and prepare paths for Gaussian Splatting.\n\n Args:\n colmap_model_dir (str): Path to the COLMAP sparse/0 directory.\n Example: /content/output/colmap/sparse/0\n output_dir (str): Base output directory.\n\n Returns:\n colmap_parent_dir (str): The path to be passed to Gaussian Splatting.\n Example: /content/output/colmap (Parent directory containing 'sparse/')\n \"\"\"\n print(\"\\n=== Verifying COLMAP Model for Gaussian Splatting ===\")\n\n import pycolmap\n reconstruction = pycolmap.Reconstruction(colmap_model_dir)\n\n print(f\"Registered images: {len(reconstruction.images)}\")\n print(f\"3D points: {len(reconstruction.points3D)}\")\n\n # Check for files required by Gaussian Splatting\n required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n for file in required_files:\n file_path = os.path.join(colmap_model_dir, file)\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"Required file not found: {file}\")\n print(f\" ✓ {file}\")\n\n # Return the grandparent directory of sparse/0\n # /content/output/colmap/sparse/0 -> /content/output/colmap\n colmap_parent_dir = os.path.dirname(os.path.dirname(colmap_model_dir))\n\n print(f\"\\n✓ COLMAP model ready for Gaussian Splatting\")\n print(f\" Source path: {colmap_parent_dir}\")\n\n return colmap_parent_dir","metadata":{"id":"4IioqnC1mVcM"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=30000):\n \"\"\"\n Train a Gaussian Splatting model.\n\n Args:\n colmap_dir (str): COLMAP parent directory (the directory containing 'sparse/').\n Example: /content/output/colmap\n image_dir (str): Directory containing training images.\n Example: /content/output/processed_images\n output_dir (str): Base directory for Gaussian Splatting output.\n iterations (int): Total number of training iterations.\n\n Returns:\n gs_output_dir (str): Path to the generated Gaussian Splatting output.\n \"\"\"\n print(\"\\n=== Training Gaussian Splatting ===\")\n\n gs_output_dir = os.path.join(output_dir, 'gs_output')\n os.makedirs(gs_output_dir, exist_ok=True)\n\n # Verify the Gaussian Splatting directory structure\n sparse_dir = os.path.join(colmap_dir, 'sparse', '0')\n if not os.path.exists(sparse_dir):\n raise FileNotFoundError(f\"COLMAP sparse directory not found: {sparse_dir}\")\n\n print(f\"COLMAP sparse model: {sparse_dir}\")\n print(f\"Training images: {image_dir}\")\n print(f\"Output: {gs_output_dir}\")\n print(f\"Iterations: {iterations}\")\n\n # Gaussian Splatting command\n cmd = [\n 'python', 'train.py',\n '-s', colmap_dir, # Source directory (must contain 'sparse/')\n '--images', image_dir, # Explicitly specify the images directory\n '-m', gs_output_dir, # Model output directory\n '--iterations', str(iterations),\n '--test_iterations', str(iterations//2), str(iterations),\n '--save_iterations', str(iterations//2), str(iterations),\n ]\n\n print(f\"\\nCommand: {' '.join(cmd)}\\n\")\n\n result = subprocess.run(cmd, capture_output=True, text=True)\n\n print(result.stdout)\n if result.stderr:\n print(\"STDERR:\", result.stderr)\n\n if result.returncode != 0:\n raise RuntimeError(\"Gaussian Splatting training failed\")\n\n # Check for the existence of the generated PLY file\n ply_path = os.path.join(gs_output_dir, 'point_cloud', f'iteration_{iterations}', 'point_cloud.ply')\n if os.path.exists(ply_path):\n size_mb = os.path.getsize(ply_path) / (1024 * 1024)\n print(f\"\\n✓ Training complete!\")\n print(f\" PLY file: {ply_path}\")\n print(f\" Size: {size_mb:.2f} MB\")\n else:\n print(f\"⚠️ Warning: PLY file not found at the expected location\")\n\n return gs_output_dir","metadata":{"id":"EiHoRSfzQ01b"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **main**","metadata":{"id":"IqNcsheVywit"}},{"cell_type":"code","source":"# NumPy互換性の問題を修正\nimport numpy as np\n\nprint()\nprint(\"-------!pip show numpy | grep Version--------\")\n!pip show numpy | grep Version\n\n# array_to_blob関数をモンキーパッチ\ndef array_to_blob_fixed(array):\n return array.tobytes()\n\n# databaseモジュール内の関数を置き換え\nimport sys\nif 'database' in sys.modules:\n sys.modules['database'].array_to_blob = array_to_blob_fixed","metadata":{"id":"oKi5O7rTvdea","outputId":"41728e81-c9a9-4076-a18e-05ab7b5803ab"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024,max_images=None):\n \"\"\"\n Complete pipeline: Images → Square Processing → COLMAP → Gaussian Splatting\n \"\"\"\n print(\"=\"*70)\n print(\"Gaussian Splatting Preparation Pipeline\")\n print(\"=\"*70)\n\n # Step 0: Standardize images to square format\n #processed_dir = os.path.join(output_dir, 'processed_images')\n #processed_image_dir = preprocess_images_square(image_dir, processed_dir, size=square_size)\n\n processed_image_dir = os.path.join(output_dir, \"processed_images\")\n\n normalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=processed_image_dir,\n size=square_size,\n max_images=max_images\n)\n\n # Setup paths\n feature_dir = os.path.join(output_dir, 'features')\n colmap_dir = os.path.join(output_dir, 'colmap')\n database_path = os.path.join(colmap_dir, 'database.db')\n sparse_dir = os.path.join(colmap_dir, 'sparse')\n\n os.makedirs(output_dir, exist_ok=True)\n os.makedirs(colmap_dir, exist_ok=True)\n\n # Get image paths\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n\n print(f\"\\n📸 Found {len(image_paths)} images\")\n\n # Step 1: Generate image pairs\n pairs, features = get_image_pairs(image_paths)\n\n # Step 2: Feature matching with LightGlue\n match_pairs_lightglue(image_paths, pairs, features, feature_dir)\n\n # Step 3: Import data into COLMAP\n # (single_camera=True assumes uniform image dimensions)\n import_to_colmap(processed_image_dir, feature_dir, database_path, single_camera=True)\n\n # Step 4: Run COLMAP Sparse Reconstruction\n model_dir = run_colmap_mapper(database_path, processed_image_dir, sparse_dir)\n\n # Step 5: Verify and prepare for Gaussian Splatting\n colmap_parent = convert_to_gs_format(model_dir, output_dir)\n\n # Step 6: Train Gaussian Splatting model\n gs_output = train_gaussian_splatting(\n colmap_dir=colmap_parent,\n image_dir=processed_image_dir,\n output_dir=output_dir,\n iterations=3000\n )\n\n print(\"\\n\" + \"=\"*70)\n print(\"✅ Full Pipeline Successfully Completed!\")\n print(\"=\"*70)\n print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n\n return gs_output\n\n\n# Example usage\nif __name__ == \"__main__\":\n # Example: Tourist photos with varying resolutions/aspect ratios\n IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n OUTPUT_DIR = \"/content/output\"\n\n gs_output = main_pipeline(IMAGE_DIR, OUTPUT_DIR, square_size=1024,max_images=30)","metadata":{"id":"5-_UvgTtRiC_","outputId":"0f6a7562-16b8-4d99-b4e2-fcc616000067"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\ntorch.cuda.empty_cache()","metadata":{"id":"SZzD2-K6islN"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"print('Congratulation! Successsfully Completed!')","metadata":{"id":"8jhKKtTqwv7O","outputId":"ff3285b8-a065-46fe-cd8a-7fc73c29ffb4"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"id":"VQsLeKY8Rl8Y"},"outputs":[],"execution_count":null}]}
|
ex14-protocol.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.12.12"},"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[],"machine_shape":"hm"},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":14705005,"sourceType":"datasetVersion","datasetId":1429416}],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false},"papermill":{"default_parameters":{},"duration":967.270978,"end_time":"2026-01-20T01:22:34.649213","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-20T01:06:27.378235","version":"2.6.0"},"widgets":{"application/vnd.jupyter.widget-state+json":{"71659c5eb8704c428eb984e9dd6fca41":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_157b8fcb5f564a1f83feb30af2412dbc","IPY_MODEL_ab27d947283b4dda80bc0267ac0950d1","IPY_MODEL_dfb970745845470892fcef2a793fa722"],"layout":"IPY_MODEL_f2bb371554334d4bbee1839c7b3c5b6e"}},"157b8fcb5f564a1f83feb30af2412dbc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b9643bbd15964e4a90b496fab872f754","placeholder":"","style":"IPY_MODEL_817e2eaa5eb341f29c86a73c715c476a","value":"preprocessor_config.json: 100%"}},"ab27d947283b4dda80bc0267ac0950d1":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_335528a65ea74708bee26d778aac70b5","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2912e19431b845a988f6617668ceecf8","value":436}},"dfb970745845470892fcef2a793fa722":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5db5e4aeb52848728293e9d082e4940c","placeholder":"","style":"IPY_MODEL_d78328ca525d4bceab3890d57fa34ae5","value":" 436/436 [00:00<00:00, 54.5kB/s]"}},"f2bb371554334d4bbee1839c7b3c5b6e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b9643bbd15964e4a90b496fab872f754":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"817e2eaa5eb341f29c86a73c715c476a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"335528a65ea74708bee26d778aac70b5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2912e19431b845a988f6617668ceecf8":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5db5e4aeb52848728293e9d082e4940c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d78328ca525d4bceab3890d57fa34ae5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"627c1695bcb04598a36dd72c8a69e7f5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_c6ffcf6df8764f3faf59420f3373939d","IPY_MODEL_00cf6f11d4314130a64df95396b892ec","IPY_MODEL_ea5e39d79d7f4acfb8f3063d72311462"],"layout":"IPY_MODEL_4fce934bd4bf46baa7be4fb33e74d16f"}},"c6ffcf6df8764f3faf59420f3373939d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bd527001905b4329bbe4f58446b72d37","placeholder":"","style":"IPY_MODEL_b807318a18134af7ae1ae18a4c9f8a13","value":"config.json: 100%"}},"00cf6f11d4314130a64df95396b892ec":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3781299ab1244523931232163602cb44","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_932b36d8d6324e13ad1bfad07f93e57e","value":548}},"ea5e39d79d7f4acfb8f3063d72311462":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_37cb90e7e8d7424fb02e171ad3350c7c","placeholder":"","style":"IPY_MODEL_9732ccda34774836a11d81da1520ac99","value":" 548/548 [00:00<00:00, 68.8kB/s]"}},"4fce934bd4bf46baa7be4fb33e74d16f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bd527001905b4329bbe4f58446b72d37":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b807318a18134af7ae1ae18a4c9f8a13":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3781299ab1244523931232163602cb44":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"932b36d8d6324e13ad1bfad07f93e57e":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"37cb90e7e8d7424fb02e171ad3350c7c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9732ccda34774836a11d81da1520ac99":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"69af90d36da248978f28892084155d27":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_789dc5db84b84e7299d7cd6a513500a3","IPY_MODEL_acc6ca7ddbba4ca6bf2a6a2c217d566b","IPY_MODEL_b7e774e19f264f738e8b365d93423b3d"],"layout":"IPY_MODEL_d1cb7996322e4b1e8124857839e305db"}},"789dc5db84b84e7299d7cd6a513500a3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_90a22edc704c4768ac3b1511d2e629ea","placeholder":"","style":"IPY_MODEL_392c7092c9214557b0f26209ad32fcf1","value":"model.safetensors: 100%"}},"acc6ca7ddbba4ca6bf2a6a2c217d566b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_74c7eb4966a14d65916259589ff1dc2e","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_45e0866962b0460e87c8ce180a96115f","value":346345912}},"b7e774e19f264f738e8b365d93423b3d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4b9c40e6d61d4b3aaaa6df96e695e831","placeholder":"","style":"IPY_MODEL_732b1fca8bab4e3e98e5b80317fa9198","value":" 346M/346M [00:01<00:00, 362MB/s]"}},"d1cb7996322e4b1e8124857839e305db":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"90a22edc704c4768ac3b1511d2e629ea":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"392c7092c9214557b0f26209ad32fcf1":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"74c7eb4966a14d65916259589ff1dc2e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"45e0866962b0460e87c8ce180a96115f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"4b9c40e6d61d4b3aaaa6df96e695e831":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"732b1fca8bab4e3e98e5b80317fa9198":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"fc6e0d42af74464da586a89397b74752":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f00b37c9901a4dde85ca44bf93aad7c4","IPY_MODEL_777478352a84439eb393cb5b6ea3fa1a","IPY_MODEL_81fd0c78a76244b48c99983a9cf66660"],"layout":"IPY_MODEL_fe164865bb9d49ecb4caf9389baa7975"}},"f00b37c9901a4dde85ca44bf93aad7c4":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b3ba20d7739847a8ba8a7a19150cab5f","placeholder":"","style":"IPY_MODEL_f22284b9edc04fb8937ae8c434f98cd7","value":"Loading weights: 100%"}},"777478352a84439eb393cb5b6ea3fa1a":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_dff1208989d34ad88b517c4ca788c6b2","max":223,"min":0,"orientation":"horizontal","style":"IPY_MODEL_bfdd2325fc234667980928f3871d04ca","value":223}},"81fd0c78a76244b48c99983a9cf66660":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bccce2558d5a4e01840a388a39e0104e","placeholder":"","style":"IPY_MODEL_e867a871e17c4f46a4959900d066f540","value":" 223/223 [00:00<00:00, 1154.74it/s, Materializing param=layernorm.weight]"}},"fe164865bb9d49ecb4caf9389baa7975":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b3ba20d7739847a8ba8a7a19150cab5f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f22284b9edc04fb8937ae8c434f98cd7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dff1208989d34ad88b517c4ca788c6b2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bfdd2325fc234667980928f3871d04ca":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"bccce2558d5a4e01840a388a39e0104e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e867a871e17c4f46a4959900d066f540":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"","metadata":{"id":"ukeu2UQAGpBV"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **biplet_dino_mast3r_ps3_gs_kg**\n\n","metadata":{"id":"qDQLX3PArmh8","papermill":{"duration":0.003504,"end_time":"2026-01-20T01:06:31.022336","exception":false,"start_time":"2026-01-20T01:06:31.018832","status":"completed"},"tags":[]}},{"cell_type":"markdown","source":"# setup","metadata":{"id":"M4iuU0WnGpBY"}},{"cell_type":"code","source":"from google.colab import drive\ndrive.mount('/content/drive')","metadata":{"id":"vfqsbwoqlO7r","outputId":"da254904-082f-472d-8bf6-2117530cdd7f"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# MASt3R-based Gaussian Splatting Pipeline\n# Preserves: DINO pair selection + Biplet-Square Normalization\n# Replaces: ALIKED/LightGlue/COLMAP with MASt3R\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image, ImageFilter\nimport struct\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection - CRITICAL for memory\n GLOBAL_TOPK = 20 # Reduced from 50 - each image pairs with top 20\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\"\n\n # MASt3R - Reduced size for memory\n MAST3R_MODEL = \"/content/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n MAST3R_IMAGE_SIZE = 224 # Small size to save memory\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n\n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n# ============================================================================\n# Environment Setup\n# ============================================================================\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"❌ Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_base_environment():\n \"\"\"Setup base Python environment\"\"\"\n print(\"\\n=== Setting up Base Environment ===\")\n\n # NumPy fix for Python 3.12\n print(\"\\n📦 Fixing NumPy...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n\n # PyTorch\n print(\"\\n📦 Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n\n # Core utilities\n print(\"\\n📦 Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\",\n \"scipy\", # for rotation conversions and image resizing\n \"psutil\" # for memory monitoring\n ])\n\n # Transformers for DINO\n print(\"\\n📦 Installing transformers...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers>=4.45.0\"\n ])\n\n # pycolmap for COLMAP format\n print(\"\\n📦 Installing pycolmap...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n print(\"✓ Base environment setup complete!\")\n\n\ndef setup_mast3r():\n \"\"\"Install and setup MASt3R\"\"\"\n print(\"\\n=== Setting up MASt3R ===\")\n\n os.chdir('/content')\n\n # Remove existing installation\n if os.path.exists('mast3r'):\n print(\"Removing existing MASt3R installation...\")\n os.system('rm -rf mast3r')\n\n # Clone repository\n print(\"Cloning MASt3R repository...\")\n os.system('git clone --recursive https://github.com/naver/mast3r')\n os.chdir('/content/mast3r')\n\n # Check dust3r directory\n print(\"Checking dust3r structure...\")\n os.system('ls -la dust3r/')\n\n # Install dust3r\n print(\"Installing dust3r...\")\n os.system('cd dust3r && python -m pip install -e .')\n\n # Install croco\n print(\"Installing croco...\")\n os.system('cd dust3r/croco && python -m pip install -e .')\n\n # Install requirements\n print(\"Installing MASt3R requirements...\")\n os.system('pip install -r requirements.txt')\n\n # Download model weights\n print(\"Downloading model weights...\")\n os.system('mkdir -p checkpoints')\n os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n\n # Install additional dependencies\n print(\"Installing additional dependencies...\")\n os.system('pip install trimesh matplotlib roma')\n\n # Add to path\n sys.path.insert(0, '/content/mast3r')\n sys.path.insert(0, '/content/mast3r/dust3r')\n\n # Verification\n print(\"\\n🔍 Verifying MASt3R installation...\")\n try:\n from mast3r.model import AsymmetricMASt3R\n print(\" ✓ MASt3R import: OK\")\n except Exception as e:\n print(f\" ❌ MASt3R import failed: {e}\")\n raise\n\n print(\"✓ MASt3R setup complete!\")\n\ndef setup_gaussian_splatting():\n \"\"\"Setup Gaussian Splatting\"\"\"\n print(\"\\n=== Setting up Gaussian Splatting ===\")\n\n os.chdir('/content')\n\n WORK_DIR = \"gaussian-splatting\"\n\n if not os.path.exists(WORK_DIR):\n print(\"Cloning Gaussian Splatting repository...\")\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"✓ Repository already exists\")\n\n os.chdir(WORK_DIR)\n\n # Install requirements\n print(\"Installing Gaussian Splatting requirements...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n\n # Build submodules\n print(\"\\n📦 Building Gaussian Splatting submodules...\")\n\n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n\n for name, repo in submodules.items():\n print(f\"\\n📦 Installing {name}...\")\n path = os.path.join(\"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n print(\"✓ Gaussian Splatting setup complete!\")\n","metadata":{"execution":{"iopub.status.busy":"2026-02-02T08:53:09.950151Z","iopub.execute_input":"2026-02-02T08:53:09.950445Z","iopub.status.idle":"2026-02-02T08:53:09.967045Z","shell.execute_reply.started":"2026-02-02T08:53:09.95042Z","shell.execute_reply":"2026-02-02T08:53:09.966479Z"},"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true,"_kg_hide-output":true,"id":"hGX7IYJ6GpBZ"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\nsetup_base_environment()\nclear_memory()\n\nsetup_mast3r()\nclear_memory()\n\nsetup_gaussian_splatting()\nclear_memory()","metadata":{"trusted":true,"_kg_hide-output":true,"execution":{"iopub.status.busy":"2026-02-02T08:53:09.968021Z","iopub.execute_input":"2026-02-02T08:53:09.968253Z","iopub.status.idle":"2026-02-02T08:56:35.635976Z","shell.execute_reply.started":"2026-02-02T08:53:09.968233Z","shell.execute_reply":"2026-02-02T08:56:35.635328Z"},"id":"sIf3UgDZGpBa","outputId":"0a1eb888-aba1-4990-ab52-28584e9104ec"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"38nn_QqcGpBa"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# dino & mast3r","metadata":{"id":"L6OBEO0zGpBa"}},{"cell_type":"code","source":"# ============================================================================\n# Step 0: Biplet-Square Normalization (PRESERVED FROM ORIGINAL)\n# ============================================================================\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory and returns the output directory\n and the list of generated file paths.\n\n Args:\n input_dir: Input directory containing source images\n output_dir: Output directory for processed images\n size: Target square size (default: 1024)\n max_images: Maximum number of SOURCE images to process (default: None = all images)\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"--- Step 1: Biplet-Square Normalization ---\")\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n generated_paths = []\n converted_count = 0\n size_stats = {}\n\n # Sort for consistent processing order\n image_files = sorted([f for f in os.listdir(input_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n\n # ★ max_images で元画像数を制限\n if max_images is not None:\n image_files = image_files[:max_images]\n print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n\n for img_file in image_files:\n input_path = os.path.join(input_dir, img_file)\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n # Tracking original aspect ratios\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops using the helper function\n crops = generate_two_crops(img, size)\n base_name, ext = os.path.splitext(img_file)\n\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n generated_paths.append(output_path)\n\n converted_count += 1\n print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Total output images: {len(generated_paths)}\")\n print(f\"Original size distribution: {size_stats}\")\n\n return output_dir, generated_paths\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops\n\n# ============================================================================\n# Step 1: DINO-based Pair Selection (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors with memory management\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n batch_size = 1 # Small batch to save memory\n\n for i in tqdm(range(0, len(image_paths), batch_size)):\n batch_paths = image_paths[i:i+batch_size]\n batch_imgs = []\n\n for img_path in batch_paths:\n img = load_torch_image(img_path, device)\n batch_imgs.append(img)\n\n batch_tensor = torch.cat(batch_imgs, dim=0)\n\n with torch.no_grad():\n inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n\n # Clear batch memory\n del batch_tensor, inputs, outputs, desc\n clear_memory()\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model, processor\n clear_memory()\n\n print(\"After DINO extraction:\")\n get_memory_info()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n # Remove duplicates\n pairs = list(set(pairs))\n\n return pairs\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n \"\"\"\n Select diverse pairs to ensure good image coverage\n Strategy: Select pairs that maximize image coverage\n \"\"\"\n import random\n random.seed(42)\n\n if len(pairs) <= max_pairs:\n return pairs\n\n print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n\n # Count how many times each image appears in pairs\n image_counts = {i: 0 for i in range(num_images)}\n for i, j in pairs:\n image_counts[i] += 1\n image_counts[j] += 1\n\n # Sort pairs by: prefer pairs with less-connected images\n def pair_score(pair):\n i, j = pair\n # Lower score = images appear in fewer pairs = more diverse\n return image_counts[i] + image_counts[j]\n\n pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n pairs_scored.sort(key=lambda x: x[1])\n\n # Select pairs greedily to maximize coverage\n selected = []\n selected_images = set()\n\n # Phase 1: Select pairs that add new images (greedy coverage)\n for pair, score in pairs_scored:\n if len(selected) >= max_pairs:\n break\n i, j = pair\n # Prefer pairs that include new images\n if i not in selected_images or j not in selected_images:\n selected.append(pair)\n selected_images.add(i)\n selected_images.add(j)\n\n # Phase 2: Fill remaining slots with high-similarity pairs\n if len(selected) < max_pairs:\n remaining = [p for p, s in pairs_scored if p not in selected]\n random.shuffle(remaining)\n selected.extend(remaining[:max_pairs - len(selected)])\n\n print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n\n return selected\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n \"\"\"DINO-based pair selection with intelligent limiting\"\"\"\n device = Config.DEVICE\n\n # DINO global features\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from DINO: {len(pairs)}\")\n\n # Apply intelligent pair selection if limit specified\n if max_pairs and len(pairs) > max_pairs:\n pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n\n return pairs\n\n# ============================================================================\n# Step 2: MASt3R Reconstruction (REPLACES ALIKED/LIGHTGLUE/COLMAP)\n# ============================================================================\n\ndef load_mast3r_model(device='cuda'):\n \"\"\"Load MASt3R model\"\"\"\n from mast3r.model import AsymmetricMASt3R\n\n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n\n print(f\"✓ MASt3R model loaded on {device}\")\n return model\n\ndef load_images_for_mast3r(image_paths, size=224):\n \"\"\"Load images using DUSt3R's format with reduced size\"\"\"\n print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n\n from dust3r.utils.image import load_images\n\n # Load images using DUSt3R's loader with reduced size\n images = load_images(image_paths, size=size, verbose=True)\n\n return images\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, max_pairs=None):\n \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n\n # Limit number of pairs if specified\n if max_pairs and len(pairs) > max_pairs:\n print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n # Select pairs more evenly distributed\n step = max(1, len(pairs) // max_pairs)\n pairs = pairs[::step][:max_pairs]\n\n print(f\"Processing {len(pairs)} pairs...\")\n\n # Load images in smaller size\n print(f\"Loading {len(image_paths)} images at {Config.MAST3R_IMAGE_SIZE}x{Config.MAST3R_IMAGE_SIZE}...\")\n images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n\n print(f\"Loaded {len(images)} images\")\n print(\"After loading images:\")\n get_memory_info()\n\n # Create all image pairs at once\n print(f\"Creating {len(pairs)} image pairs...\")\n mast3r_pairs = []\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n mast3r_pairs.append((images[idx1], images[idx2]))\n\n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n\n # Run inference (this returns the dict format we need)\n output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n\n # Clear pairs from memory\n del mast3r_pairs\n clear_memory()\n\n print(\"✓ MASt3R inference complete\")\n print(\"After inference:\")\n get_memory_info()\n\n # Global alignment\n print(\"Running global alignment...\")\n scene = global_aligner(\n output,\n device=device,\n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n\n # Clear output after creating scene\n del output\n clear_memory()\n\n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\",\n niter=150, # Reduced from 300\n schedule='cosine',\n lr=0.01\n )\n\n print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n\n return scene, images","metadata":{"execution":{"iopub.status.busy":"2026-02-02T08:56:35.637478Z","iopub.execute_input":"2026-02-02T08:56:35.637807Z","iopub.status.idle":"2026-02-02T08:56:35.663238Z","shell.execute_reply.started":"2026-02-02T08:56:35.637784Z","shell.execute_reply":"2026-02-02T08:56:35.66271Z"},"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true,"id":"uMBRydm7GpBa"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"_tTGwERoGpBb"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# process3","metadata":{"id":"1FcMgB48GpBb"}},{"cell_type":"code","source":"# ============================================================================\n# COLMAP Conversion (process3_11.py) - COMPLETE FIXED VERSION - ply success\n# ============================================================================\n\nimport numpy as np\nimport cv2\nfrom pathlib import Path\nimport struct\nfrom scipy.spatial.transform import Rotation\nimport torch\nfrom PIL import Image\n\n\ndef write_next_bytes(fid, data, format_str):\n \"\"\"Helper function to write bytes to file\"\"\"\n if isinstance(data, (list, tuple, np.ndarray)):\n fid.write(struct.pack(\"<\" + format_str, *data))\n else:\n fid.write(struct.pack(\"<\" + format_str, data))\n\n\ndef matrix_to_quaternion_translation(matrix: np.ndarray):\n \"\"\"Robust conversion of 4x4 transformation matrix to quaternion and translation.\"\"\"\n R = matrix[:3, :3]\n t = matrix[:3, 3]\n\n # Use scipy for robust quaternion conversion\n rot = Rotation.from_matrix(R)\n quat = rot.as_quat() # Returns [x, y, z, w]\n\n # COLMAP format is [w, x, y, z]\n qvec = np.array([quat[3], quat[0], quat[1], quat[2]])\n\n return qvec, t\n\n\ndef write_cameras_binary(cameras, path_to_model_file):\n \"\"\"Write COLMAP cameras.bin file\"\"\"\n with open(path_to_model_file, \"wb\") as fid:\n write_next_bytes(fid, len(cameras), \"Q\")\n for camera_id, cam in cameras.items():\n model_id = 1 # PINHOLE\n write_next_bytes(fid, camera_id, \"I\")\n write_next_bytes(fid, model_id, \"I\")\n write_next_bytes(fid, cam['width'], \"Q\")\n write_next_bytes(fid, cam['height'], \"Q\")\n for p in cam['params']:\n write_next_bytes(fid, float(p), \"d\")\n\n\ndef write_images_binary(images, path_to_model_file):\n \"\"\"Write COLMAP images.bin file\"\"\"\n with open(path_to_model_file, \"wb\") as fid:\n write_next_bytes(fid, len(images), \"Q\")\n for image_id, img in images.items():\n write_next_bytes(fid, image_id, \"I\")\n write_next_bytes(fid, img['qvec'], \"dddd\")\n write_next_bytes(fid, img['tvec'], \"ddd\")\n write_next_bytes(fid, img['camera_id'], \"I\")\n\n # Write image name\n for char in img['name']:\n write_next_bytes(fid, char.encode(\"utf-8\"), \"c\")\n write_next_bytes(fid, b\"\\x00\", \"c\")\n\n # Write 2D points\n write_next_bytes(fid, len(img['xys']), \"Q\")\n for xy, point3D_id in zip(img['xys'], img['point3D_ids']):\n write_next_bytes(fid, xy, \"dd\")\n write_next_bytes(fid, point3D_id, \"Q\")\n\n\ndef write_points3d_binary(points3D, path_to_model_file):\n \"\"\"\n Write COLMAP points3D.bin file\n\n Args:\n points3D: list or dict of 3D point data\n path_to_model_file: path to points3D.bin\n \"\"\"\n with open(path_to_model_file, \"wb\") as fid:\n # Write number of points\n if isinstance(points3D, dict):\n write_next_bytes(fid, len(points3D), \"Q\")\n points_iter = points3D.values()\n else:\n write_next_bytes(fid, len(points3D), \"Q\")\n points_iter = points3D\n\n # Write each point\n for point_id, point in enumerate(points_iter):\n # Handle both dict with 'id' key and list with index\n if isinstance(point, dict) and 'id' in point:\n pid = point['id']\n else:\n pid = point_id\n\n write_next_bytes(fid, pid, \"Q\")\n write_next_bytes(fid, point['xyz'], \"ddd\")\n write_next_bytes(fid, point['rgb'], \"BBB\")\n write_next_bytes(fid, point['error'], \"d\")\n\n # Write track\n track_length = len(point['image_ids'])\n write_next_bytes(fid, track_length, \"Q\")\n for image_id, point2D_idx in zip(point['image_ids'], point['point2D_idxs']):\n write_next_bytes(fid, int(image_id), \"I\")\n write_next_bytes(fid, int(point2D_idx), \"I\")\n\n\ndef save_image_data(scene, images_dir, depth_dir, normal_dir, mask_dir, min_conf_thr, verbose, processed_image_paths=None):\n \"\"\"Save RGB images, depth maps, normal maps, and masks\"\"\"\n if verbose:\n print(\"\\nSaving image data...\")\n\n # Ensure directories exist\n images_dir.mkdir(parents=True, exist_ok=True)\n depth_dir.mkdir(parents=True, exist_ok=True)\n normal_dir.mkdir(parents=True, exist_ok=True)\n mask_dir.mkdir(parents=True, exist_ok=True)\n\n # Get the number of views\n if hasattr(scene, 'imgs'):\n num_views = len(scene.imgs)\n imgs = scene.imgs\n elif hasattr(scene, 'views'):\n num_views = len(scene.views)\n imgs = scene.views\n else:\n if verbose:\n print(\" Warning: Cannot access views\")\n return\n\n # Use processed images if provided\n if processed_image_paths is not None and len(processed_image_paths) > 0:\n if verbose:\n print(f\" Using {len(processed_image_paths)} processed images\")\n\n import shutil\n for idx, src_path in enumerate(processed_image_paths):\n if idx >= num_views:\n break\n\n try:\n # Copy processed images\n dst_path = images_dir / f'image_{idx:04d}.jpg'\n shutil.copy2(src_path, dst_path)\n\n if verbose and idx < 3:\n print(f\" Copied image {idx}: {Path(src_path).name}\")\n except Exception as e:\n if verbose:\n print(f\" Error copying image {idx}: {e}\")\n else:\n # If no processed images, extract images from the scene\n if verbose:\n print(\" No processed images provided, extracting from scene...\")\n\n for idx in range(num_views):\n try:\n # Save RGB images\n img_path = images_dir / f'image_{idx:04d}.jpg'\n\n # Retrieve image data\n if hasattr(imgs[idx], 'img'):\n img = imgs[idx].img\n elif hasattr(imgs[idx], 'image'):\n img = imgs[idx].image\n else:\n img = imgs[idx]\n\n # Convert tensor to numpy array\n if isinstance(img, torch.Tensor):\n img = img.detach().cpu().numpy()\n\n # Convert image to correct format\n if isinstance(img, np.ndarray):\n # Convert (C, H, W) -> (H, W, C)\n if img.ndim == 3 and img.shape[0] in [1, 3, 4]:\n img = np.transpose(img, (1, 2, 0))\n\n # Normalize values to [0, 255] range\n if img.max() <= 1.0:\n img = (img * 255).astype(np.uint8)\n else:\n img = img.astype(np.uint8)\n\n # Convert grayscale to RGB\n if img.ndim == 2:\n img = np.stack([img, img, img], axis=-1)\n elif img.shape[-1] == 1:\n img = np.repeat(img, 3, axis=-1)\n\n # Save the image\n Image.fromarray(img).save(img_path)\n\n if verbose and idx < 3:\n print(f\" Saved image {idx}: {img_path}\")\n except Exception as e:\n if verbose:\n print(f\" Error saving image {idx}: {e}\")\n\n # Save depth maps\n try:\n if hasattr(scene, 'get_depthmaps'):\n depthmaps = scene.get_depthmaps()\n if depthmaps is not None:\n for idx in range(min(num_views, len(depthmaps))):\n depth = depthmaps[idx]\n if isinstance(depth, torch.Tensor):\n depth = depth.detach().cpu().numpy()\n\n if isinstance(depth, np.ndarray):\n depth_path = depth_dir / f'depth_{idx:04d}.npy'\n np.save(depth_path, depth)\n\n if verbose and idx < 3:\n print(f\" Saved depth {idx}: {depth_path}\")\n except Exception as e:\n if verbose:\n print(f\" Note: Could not save depth maps: {e}\")\n\n # Save masks\n try:\n if hasattr(scene, 'get_masks'):\n masks = scene.get_masks()\n if masks is not None:\n for idx in range(min(num_views, len(masks))):\n mask = masks[idx]\n if isinstance(mask, torch.Tensor):\n mask = mask.detach().cpu().numpy()\n\n if isinstance(mask, np.ndarray):\n mask_path = mask_dir / f'mask_{idx:04d}.png'\n mask_img = (mask * 255).astype(np.uint8)\n Image.fromarray(mask_img).save(mask_path)\n\n if verbose and idx < 3:\n print(f\" Saved mask {idx}: {mask_path}\")\n except Exception as e:\n if verbose:\n print(f\" Note: Could not save masks: {e}\")\n\n if verbose:\n print(f\" Completed saving {num_views} images\")\n\n\ndef extract_scene_data(scene, min_conf_thr, verbose):\n \"\"\"Extract cameras, images, and 3D points from MASt3R scene\"\"\"\n cameras = {}\n images_data = {}\n points3D = []\n\n if verbose:\n print(\"\\nExtracting scene data...\")\n\n # Check scene structure\n if hasattr(scene, 'imgs'):\n num_views = len(scene.imgs)\n imgs = scene.imgs\n elif hasattr(scene, 'views'):\n num_views = len(scene.views)\n imgs = scene.views\n else:\n num_views = 0\n imgs = []\n\n if verbose:\n print(f\"Number of views: {num_views}\")\n\n # Extract camera parameters and poses\n for idx in range(num_views):\n # Get image size\n if hasattr(scene, 'imshapes') and idx < len(scene.imshapes):\n height, width = scene.imshapes[idx]\n else:\n height, width = 192, 256\n\n # Get intrinsics\n fx = fy = 260.0\n cx = width / 2.0\n cy = height / 2.0\n\n try:\n if hasattr(scene, 'get_intrinsics'):\n K = scene.get_intrinsics()\n if K is not None:\n if isinstance(K, torch.Tensor):\n K = K.detach().cpu().numpy()\n if K.ndim >= 2:\n K_view = K[idx] if K.ndim == 3 else K\n if K_view.shape[0] >= 3 and K_view.shape[1] >= 3:\n fx = float(K_view[0, 0])\n fy = float(K_view[1, 1])\n cx = float(K_view[0, 2])\n cy = float(K_view[1, 2])\n except:\n pass\n\n cameras[idx] = {\n 'model': 'PINHOLE',\n 'width': int(width),\n 'height': int(height),\n 'params': [fx, fy, cx, cy]\n }\n\n # Get pose\n qvec = np.array([1.0, 0.0, 0.0, 0.0])\n tvec = np.array([0.0, 0.0, 0.0])\n\n try:\n if hasattr(scene, 'get_im_poses'):\n poses = scene.get_im_poses()\n if poses is not None and idx < len(poses):\n pose = poses[idx]\n if isinstance(pose, torch.Tensor):\n pose = pose.detach().cpu().numpy()\n\n if isinstance(pose, np.ndarray) and pose.ndim == 2 and pose.shape == (4, 4):\n det = np.linalg.det(pose)\n if abs(det) > 1e-10:\n pose_inv = np.linalg.inv(pose)\n qvec, tvec = matrix_to_quaternion_translation(pose_inv)\n except:\n pass\n\n images_data[idx + 1] = {\n 'qvec': qvec,\n 'tvec': tvec,\n 'camera_id': idx,\n 'name': f'image_{idx:04d}.jpg',\n 'xys': np.array([]),\n 'point3D_ids': np.array([])\n }\n\n # Extract 3D points WITH COLORS\n if verbose:\n print(\"\\nExtracting 3D points with colors...\")\n\n try:\n if hasattr(scene, 'get_pts3d'):\n pts3d = scene.get_pts3d()\n\n if pts3d is not None:\n # Handle list of arrays\n if isinstance(pts3d, list):\n all_points = []\n all_colors = []\n\n for view_idx, pts in enumerate(pts3d):\n if isinstance(pts, torch.Tensor):\n pts = pts.detach().cpu().numpy()\n if isinstance(pts, np.ndarray):\n all_points.append(pts.reshape(-1, 3))\n\n # Extract colors from corresponding image\n if view_idx < len(imgs):\n img = imgs[view_idx]\n if isinstance(img, torch.Tensor):\n img = img.detach().cpu().numpy()\n\n # Convert image format\n if img.ndim == 3:\n # (C, H, W) -> (H, W, C)\n if img.shape[0] in [1, 3, 4]:\n img = np.transpose(img, (1, 2, 0))\n\n # Normalize to 0-255\n if img.max() <= 1.0:\n img = (img * 255).astype(np.uint8)\n else:\n img = img.astype(np.uint8)\n\n # Handle grayscale\n if img.ndim == 2 or img.shape[-1] == 1:\n img = np.stack([img.squeeze()] * 3, axis=-1)\n\n # Reshape to match points\n img_flat = img.reshape(-1, 3)\n all_colors.append(img_flat)\n else:\n # Default gray if no image available\n n_pts = pts.reshape(-1, 3).shape[0]\n all_colors.append(np.full((n_pts, 3), 128, dtype=np.uint8))\n\n pts3d_combined = np.vstack(all_points) if all_points else None\n colors_combined = np.vstack(all_colors) if all_colors else None\n\n elif isinstance(pts3d, torch.Tensor):\n pts3d_combined = pts3d.detach().cpu().numpy().reshape(-1, 3)\n\n # Extract colors from first image\n if len(imgs) > 0:\n img = imgs[0]\n if isinstance(img, torch.Tensor):\n img = img.detach().cpu().numpy()\n\n if img.ndim == 3 and img.shape[0] in [1, 3, 4]:\n img = np.transpose(img, (1, 2, 0))\n\n if img.max() <= 1.0:\n img = (img * 255).astype(np.uint8)\n else:\n img = img.astype(np.uint8)\n\n if img.ndim == 2 or img.shape[-1] == 1:\n img = np.stack([img.squeeze()] * 3, axis=-1)\n\n colors_combined = img.reshape(-1, 3)\n else:\n colors_combined = None\n\n elif isinstance(pts3d, np.ndarray):\n pts3d_combined = pts3d.reshape(-1, 3)\n\n # Extract colors from first image\n if len(imgs) > 0:\n img = imgs[0]\n if isinstance(img, torch.Tensor):\n img = img.detach().cpu().numpy()\n\n if img.ndim == 3 and img.shape[0] in [1, 3, 4]:\n img = np.transpose(img, (1, 2, 0))\n\n if img.max() <= 1.0:\n img = (img * 255).astype(np.uint8)\n else:\n img = img.astype(np.uint8)\n\n if img.ndim == 2 or img.shape[-1] == 1:\n img = np.stack([img.squeeze()] * 3, axis=-1)\n\n colors_combined = img.reshape(-1, 3)\n else:\n colors_combined = None\n else:\n pts3d_combined = None\n colors_combined = None\n\n if pts3d_combined is not None and len(pts3d_combined) > 0:\n # Get confidence\n conf_combined = None\n if hasattr(scene, 'get_conf'):\n conf = scene.get_conf()\n if conf is not None:\n if isinstance(conf, list):\n all_conf = []\n for c in conf:\n if isinstance(c, torch.Tensor):\n c = c.detach().cpu().numpy()\n all_conf.append(c.flatten())\n conf_combined = np.concatenate(all_conf) if all_conf else None\n elif isinstance(conf, torch.Tensor):\n conf_combined = conf.detach().cpu().numpy().flatten()\n elif isinstance(conf, np.ndarray):\n conf_combined = conf.flatten()\n\n # Ensure all arrays have the same size\n min_size = len(pts3d_combined)\n if colors_combined is not None:\n min_size = min(min_size, len(colors_combined))\n if conf_combined is not None:\n min_size = min(min_size, len(conf_combined))\n\n pts3d_combined = pts3d_combined[:min_size]\n if colors_combined is not None:\n colors_combined = colors_combined[:min_size]\n else:\n colors_combined = np.full((min_size, 3), 128, dtype=np.uint8)\n\n # Filter by confidence\n if conf_combined is not None and len(conf_combined) > 0:\n conf_combined = conf_combined[:min_size]\n mask = conf_combined >= min_conf_thr\n pts3d_filtered = pts3d_combined[mask]\n colors_filtered = colors_combined[mask]\n else:\n pts3d_filtered = pts3d_combined\n colors_filtered = colors_combined\n\n # Create point cloud with colors\n for pt, color in zip(pts3d_filtered, colors_filtered):\n if np.all(np.isfinite(pt)):\n points3D.append({\n 'xyz': pt,\n 'rgb': color.astype(np.uint8), #use actual color\n 'error': 0.0,\n 'image_ids': np.array([]),\n 'point2D_idxs': np.array([])\n })\n\n if verbose:\n print(f\" Extracted {len(points3D)} 3D points with colors\")\n print(f\" Sample colors: {[p['rgb'].tolist() for p in points3D[:3]]}\")\n except Exception as e:\n if verbose:\n print(f\" Error extracting 3D points: {e}\")\n import traceback\n traceback.print_exc()\n\n if verbose:\n print(f\"\\nTotal: {len(cameras)} cameras, {len(images_data)} images, {len(points3D)} points\")\n\n return cameras, images_data, points3D\n\n\n\n\ndef convert_mast3r_to_colmap(scene, output_dir, min_conf_thr=1.5, clean_depth=True,\n mask_images=True, verbose=True, processed_image_paths=None,\n max_points=100000):\n \"\"\"\n Convert MASt3R scene to COLMAP format\n\n Args:\n scene: MASt3R optimized scene\n output_dir: Output directory path\n min_conf_thr: Minimum confidence threshold for 3D points\n clean_depth: Whether to clean depth maps\n mask_images: Whether to apply masks\n verbose: Print verbose output\n processed_image_paths: List of paths to processed (square) images\n \"\"\"\n\n output_dir = Path(output_dir)\n sparse_dir = output_dir / \"sparse\" / \"0\"\n images_dir = output_dir / \"images\"\n depth_dir = output_dir / \"depth\"\n normal_dir = output_dir / \"normal\"\n mask_dir = output_dir / \"mask\"\n\n # Create directories\n sparse_dir.mkdir(parents=True, exist_ok=True)\n images_dir.mkdir(parents=True, exist_ok=True)\n depth_dir.mkdir(parents=True, exist_ok=True)\n normal_dir.mkdir(parents=True, exist_ok=True)\n mask_dir.mkdir(parents=True, exist_ok=True)\n\n if verbose:\n print(\"\\n\" + \"=\"*70)\n print(\"Converting MASt3R scene to COLMAP format\")\n print(\"=\"*70)\n print(f\"Output directory: {output_dir}\")\n\n cameras, images_data, points3D = extract_scene_data(scene, min_conf_thr, verbose)\n\n #----------------------------down sampling\n if max_points is not None and len(points3D) > max_points:\n print(f\"\\nDownsampling 3D points from {len(points3D)} to {max_points}...\")\n\n if isinstance(points3D, dict):\n all_ids = list(points3D.keys())\n sampled_ids = np.random.choice(all_ids, max_points, replace=False)\n points3D = {idx: points3D[idx] for idx in sampled_ids}\n elif isinstance(points3D, list):\n sampled_indices = np.random.choice(len(points3D), max_points, replace=False)\n points3D = [points3D[i] for i in sampled_indices]\n else:\n raise TypeError(f\"points3D must be dict or list, got {type(points3D)}\")\n #----------------------------down sampling\n\n save_image_data(scene, images_dir, depth_dir, normal_dir, mask_dir,\n min_conf_thr, verbose, processed_image_paths=processed_image_paths)\n\n if verbose:\n print(\"\\nWriting COLMAP binary files...\")\n\n write_cameras_binary(cameras, sparse_dir / \"cameras.bin\")\n if verbose:\n print(f\" ✓ cameras.bin ({len(cameras)} cameras)\")\n\n write_images_binary(images_data, sparse_dir / \"images.bin\")\n if verbose:\n print(f\" ✓ images.bin ({len(images_data)} images)\")\n\n write_points3d_binary(points3D, sparse_dir / \"points3D.bin\")\n if verbose:\n print(f\" ✓ points3D.bin ({len(points3D)} points)\")\n\n if verbose:\n print(\"\\n\" + \"=\"*70)\n print(\"✓ COLMAP conversion complete!\")\n print(\"=\"*70)\n\n return output_dir","metadata":{"id":"X2ZCB9Kt3C1w"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# gaussian splat","metadata":{"id":"KUHtLjgsGpBc"}},{"cell_type":"code","source":"# ==========================================\n# Gaussian Splatting Training Function\n# ==========================================\ndef train_gaussian_splatting(colmap_dir, output_dir, iterations=7000):\n \"\"\"\n Train a Gaussian Splatting model using COLMAP data.\n\n Args:\n colmap_dir: Root directory of COLMAP data (contains sparse/0/*.bin)\n output_dir: Target directory for Gaussian Splatting output\n iterations: Number of training iterations\n\n Returns:\n output_dir: The path where the trained model is saved\n \"\"\"\n import subprocess\n import os\n import shutil\n from pathlib import Path\n\n print(\"======================================================================\")\n print(\"Step 5: Gaussian Splatting Training\")\n print(\"======================================================================\")\n print(f\"Input COLMAP directory (Root): {colmap_dir}\")\n print(f\"Output directory: {output_dir}\")\n print(f\"Iterations: {iterations}\")\n\n # --- Fix: Set correct search path for COLMAP binaries ---\n # MASt3R output is located in colmap_dir/sparse/0/*.bin\n colmap_sparse_src = os.path.join(colmap_dir, \"sparse\", \"0\")\n required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n\n # Pre-flight check\n print(\"\\n[1/4] Checking COLMAP files...\")\n for filename in required_files:\n filepath = os.path.join(colmap_sparse_src, filename)\n if not os.path.exists(filepath):\n raise FileNotFoundError(\n f\"Required COLMAP file not found: {filepath}\\n\"\n f\"Verify if Step 4 correctly saved files to {colmap_sparse_src}\"\n )\n print(f\" ✓ Found {filename}\")\n\n # Verify Gaussian Splatting repository\n gs_repo = \"/content/gaussian-splatting\"\n if not os.path.exists(gs_repo):\n raise FileNotFoundError(f\"Gaussian Splatting repository not found: {gs_repo}\")\n\n # --- Prepare Directory Structure ---\n # The GS train.py expects the following structure:\n # output_dir/\n # ├── images/\n # └── sparse/0/*.bin\n\n print(\"\\n[2/4] Preparing directory structure...\")\n images_dst_dir = os.path.join(output_dir, 'images')\n sparse_dst_dir = os.path.join(output_dir, 'sparse', '0')\n os.makedirs(images_dst_dir, exist_ok=True)\n os.makedirs(sparse_dst_dir, exist_ok=True)\n print(f\" ✓ Created {images_dst_dir}\")\n print(f\" ✓ Created {sparse_dst_dir}\")\n\n # --- Copy Images (Processed/Split images) ---\n # Retrieve images from 'processed_images' located alongside the colmap_dir\n print(\"\\n[3/4] Copying processed images...\")\n processed_images_src = os.path.join(os.path.dirname(colmap_dir), 'processed_images')\n\n if not os.path.exists(processed_images_src):\n raise FileNotFoundError(\n f\"Processed images directory not found: {processed_images_src}\\n\"\n f\"Expected location: {os.path.dirname(colmap_dir)}/processed_images\"\n )\n\n # Copy image files and keep a count\n copied_count = 0\n image_extensions = ('.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG')\n\n for img in sorted(os.listdir(processed_images_src)):\n if img.lower().endswith(image_extensions):\n src = os.path.join(processed_images_src, img)\n dst = os.path.join(images_dst_dir, img)\n shutil.copy2(src, dst)\n copied_count += 1\n\n if copied_count == 0:\n raise RuntimeError(f\"No images found in {processed_images_src}\")\n\n print(f\" ✓ Copied {copied_count} images from {processed_images_src}\")\n print(f\" ✓ Images prepared in {images_dst_dir}\")\n\n # --- Copy COLMAP Binaries ---\n print(\"\\n[4/4] Copying COLMAP sparse reconstruction...\")\n for filename in required_files:\n src = os.path.join(colmap_sparse_src, filename)\n dst = os.path.join(sparse_dst_dir, filename)\n # Avoid error if src and dst are the same path\n if os.path.abspath(src) != os.path.abspath(dst):\n shutil.copy2(src, dst)\n file_size = os.path.getsize(dst)\n print(f\" ✓ Copied {filename} ({file_size:,} bytes)\")\n\n print(f\" ✓ COLMAP files prepared in {sparse_dst_dir}\")\n\n # --- Construct Execution Command ---\n # Set the parent directory (containing 'images' and 'sparse/0') as the source (-s)\n print(\"\\n\" + \"=\"*70)\n print(\"Starting Gaussian Splatting Training...\")\n print(\"=\"*70)\n\n cmd = [\n \"python\", os.path.join(gs_repo, \"train.py\"),\n \"-s\", output_dir, # Use prepared directory as source\n \"-m\", output_dir, # Output training results to the same directory\n \"--iterations\", str(iterations),\n \"--test_iterations\", \"-1\",\n \"--save_iterations\", str(iterations), # Save only the final result\n \"--checkpoint_iterations\", \"-1\",\n \"--quiet\"\n ]\n\n print(f\"Command: {' '.join(cmd)}\\n\")\n\n # Execute training\n result = subprocess.run(cmd, capture_output=True, text=True)\n\n if result.returncode != 0:\n print(\"\\n\" + \"=\"*70)\n print(\"❌ Training failed!\")\n print(\"=\"*70)\n print(\"\\n--- STDOUT ---\")\n print(result.stdout)\n print(\"\\n--- STDERR ---\")\n print(result.stderr)\n print(\"=\"*70)\n raise RuntimeError(\"Gaussian Splatting training failed\")\n\n print(\"\\n\" + \"=\"*70)\n print(\"✓ Training complete!\")\n print(\"=\"*70)\n print(f\"Model saved to: {output_dir}\")\n print(f\"Point cloud: {os.path.join(output_dir, 'point_cloud', f'iteration_{iterations}')}\")\n\n return output_dir","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T08:56:35.733855Z","iopub.execute_input":"2026-02-02T08:56:35.734036Z","iopub.status.idle":"2026-02-02T08:56:35.751392Z","shell.execute_reply.started":"2026-02-02T08:56:35.734019Z","shell.execute_reply":"2026-02-02T08:56:35.75091Z"},"id":"HTSHOx23GpBc"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"ElzSgpJ6GpBc"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# main_pipeline","metadata":{"id":"wnNtrHf7GpBc"}},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024, iterations=7000,\n max_images=None, max_pairs=None, max_points=1000000):\n \"\"\"\n Complete Process3 Pipeline:\n Biplet → DINO → MASt3R → COLMAP → Gaussian Splatting\n \"\"\"\n import os\n import torch\n\n os.makedirs(output_dir, exist_ok=True)\n\n # ==========================================\n # Step 1: Biplet-Square Normalization\n # ==========================================\n print(\"\\n\" + \"=\"*70)\n print(\"Step 1: Biplet-Square Normalization\")\n print(\"=\"*70)\n\n processed_dir, image_paths = normalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=os.path.join(output_dir, 'processed_images'),\n size=square_size,\n )\n\n # ==========================================\n # Step 2: DINO Pair Selection\n # ==========================================\n print(\"\\n\" + \"=\"*70)\n print(\"Step 2: DINO Pair Selection\")\n print(\"=\"*70)\n\n pairs = get_image_pairs_dino(\n image_paths=image_paths,\n max_pairs=max_pairs\n )\n\n # ==========================================\n # Step 3: MASt3R Reconstruction\n # ==========================================\n print(\"\\n\" + \"=\"*70)\n print(\"Step 3: MASt3R Reconstruction\")\n print(\"=\"*70)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = load_mast3r_model(device)\n\n scene, mast3r_images = run_mast3r_pairs(\n model=model,\n image_paths=image_paths,\n pairs=pairs, device=device,\n max_pairs=max_pairs\n )\n\n # Clean up model\n del model\n clear_memory()\n\n # ==========================================\n # Step 4: Convert to COLMAP Format\n # ==========================================\n print(\"\\n\" + \"=\"*70)\n print(\"Step 4: COLMAP Conversion\")\n print(\"=\"*70)\n\n colmap_dir = convert_mast3r_to_colmap(\n scene=scene,\n output_dir=os.path.join(output_dir, 'colmap'),\n min_conf_thr=1.5,max_points=max_points\n )\n\n #---------------\n\n import shutil\n\n src_dir = '/content/output/colmap/images'\n dst_dir = '/content/output/gaussian_splatting/images'\n\n os.makedirs(dst_dir, exist_ok=True)\n\n files = os.listdir(src_dir)\n for f in files:\n if f.startswith('image_') and f.endswith('.jpg'):\n src_path = os.path.join(src_dir, f)\n dst_path = os.path.join(dst_dir, f)\n\n if not os.path.exists(dst_path):\n shutil.copy2(src_path, dst_path)\n\n print(f\"Copied {len(files)} files to {dst_dir}\")\n\n #-----------------\n\n # ==========================================\n # Step 5: Gaussian Splatting Training\n # ==========================================\n print(\"\\n\" + \"=\"*70)\n print(\"Step 5: Gaussian Splatting Training\")\n print(\"=\"*70)\n\n # 'colmap_output' is a Path object pointing to 'output_dir/colmap'.\n # This directory contains the generated 'sparse/0/*.bin' files.\n colmap_root = '/content/output/colmap'#str(colmap_output)\n\n # Define the output directory for Gaussian Splatting\n gs_output_dir = os.path.join(output_dir, 'gaussian_splatting')\n\n # Call the existing 'train_gaussian_splatting' function.\n # Standard GS practice is to pass the parent directory containing the 'sparse' folder.\n gs_output = train_gaussian_splatting(\n colmap_dir=colmap_root, # This is the crucial path\n output_dir=gs_output_dir,\n iterations=iterations\n )\n\n return gs_output","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T09:19:47.375104Z","iopub.execute_input":"2026-02-02T09:19:47.375866Z","iopub.status.idle":"2026-02-02T09:19:47.38652Z","shell.execute_reply.started":"2026-02-02T09:19:47.375831Z","shell.execute_reply":"2026-02-02T09:19:47.38583Z"},"id":"GcNTYU67GpBc"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"JmsYT2hVGpBc"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# execute","metadata":{"id":"Ce5fNJCEGpBc"}},{"cell_type":"code","source":"if __name__ == \"__main__\":\n\n IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n OUTPUT_DIR = \"/content/output\"\n\n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024,\n iterations=3000,\n max_images=30,\n max_pairs=1000,\n max_points=100000\n )\n\n print(f\"\\n{'='*70}\")\n print(\"Pipeline completed successfully!\")\n print(f\"{'='*70}\")\n print(f\"Gaussian Splatting output: {gs_output}\")","metadata":{"execution":{"iopub.status.busy":"2026-02-02T09:19:51.510722Z","iopub.execute_input":"2026-02-02T09:19:51.51143Z","iopub.status.idle":"2026-02-02T09:23:11.510434Z","shell.execute_reply.started":"2026-02-02T09:19:51.511399Z","shell.execute_reply":"2026-02-02T09:23:11.509646Z"},"papermill":{"duration":905.62414,"end_time":"2026-01-20T01:22:29.355023","exception":false,"start_time":"2026-01-20T01:07:23.730883","status":"completed"},"tags":[],"_kg_hide-output":true,"trusted":true,"id":"xgMLHPPpGpBd","outputId":"f3637d4f-b7f6-4325-806e-39326a1ba02a"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"DPfmJdNvGpBd"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"!apt-get install -y tree > /dev/null","metadata":{"id":"VQsLeKY8Rl8Y","papermill":{"duration":0.154679,"end_time":"2026-01-20T01:22:29.976313","exception":false,"start_time":"2026-01-20T01:22:29.821634","status":"completed"},"tags":[],"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T09:10:56.433222Z","iopub.execute_input":"2026-02-02T09:10:56.433994Z","iopub.status.idle":"2026-02-02T09:11:06.952416Z","shell.execute_reply.started":"2026-02-02T09:10:56.433963Z","shell.execute_reply":"2026-02-02T09:11:06.95163Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"!tree /content/output","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T09:17:58.224254Z","iopub.execute_input":"2026-02-02T09:17:58.225096Z","iopub.status.idle":"2026-02-02T09:17:58.5013Z","shell.execute_reply.started":"2026-02-02T09:17:58.225063Z","shell.execute_reply":"2026-02-02T09:17:58.500415Z"},"id":"O3F9m-VjGpBd","outputId":"814a6a56-23ca-4138-b69b-bbef1f9e262e"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"W0Y_jaLoGpBd"},"outputs":[],"execution_count":null}]}
|