File size: 28,364 Bytes
2266195 |
1 |
{"metadata":{"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.13"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":49349,"databundleVersionId":5447706,"sourceType":"competition"},{"sourceId":14451718,"sourceType":"datasetVersion","datasetId":1429416}],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true},"papermill":{"default_parameters":{},"duration":20573.990788,"end_time":"2026-01-11T00:00:22.081506","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-10T18:17:28.090718","version":"2.6.0"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU"},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **biplet-dino-colmap-gs**","metadata":{"papermill":{"duration":0.002985,"end_time":"2026-01-10T18:17:32.170524","exception":false,"start_time":"2026-01-10T18:17:32.167539","status":"completed"},"tags":[],"id":"fb1f1fdc"}},{"cell_type":"code","source":"#サイズの異なる画像を扱う\nfrom google.colab import drive\ndrive.mount('/content/drive')","metadata":{"id":"JON4rYSEOzCg","outputId":"802a32ed-8ecf-40c9-ddf8-3d5ce7706299"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport sys\nimport subprocess\nimport shutil\nfrom pathlib import Path\nimport cv2\nfrom PIL import Image\nimport glob\n\nIMAGE_PATH=\"/content/drive/MyDrive/your_folder/fountain100\"\nWORK_DIR = '/content/gaussian-splatting'\nOUTPUT_DIR = '/content/output'\nCOLMAP_DIR = '/content/colmap_data'","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:17:32.181455Z","iopub.status.busy":"2026-01-10T18:17:32.180969Z","iopub.status.idle":"2026-01-10T18:17:32.355942Z","shell.execute_reply":"2026-01-10T18:17:32.355229Z"},"papermill":{"duration":0.179454,"end_time":"2026-01-10T18:17:32.357275","exception":false,"start_time":"2026-01-10T18:17:32.177821","status":"completed"},"tags":[],"id":"22353010"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"❌ Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_environment():\n \"\"\"\n Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n Python 3.12 compatible version (v8)\n \"\"\"\n\n print(\"🚀 Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n\n WORK_DIR = \"gaussian-splatting\"\n\n # =====================================================================\n # STEP 0: NumPy FIX (Python 3.12 compatible)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n print(\"=\"*70)\n\n # Python 3.12 requires numpy >= 1.26\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n\n # sanity check\n run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n\n # =====================================================================\n # STEP 1: System packages (Colab)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: System packages\")\n print(\"=\"*70)\n\n run_cmd([\"apt-get\", \"update\", \"-qq\"])\n run_cmd([\n \"apt-get\", \"install\", \"-y\", \"-qq\",\n \"colmap\",\n \"build-essential\",\n \"cmake\",\n \"git\",\n \"libopenblas-dev\",\n \"xvfb\"\n ])\n\n # virtual display (COLMAP / OpenCV safety)\n os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n os.environ[\"DISPLAY\"] = \":99\"\n subprocess.Popen(\n [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n\n # =====================================================================\n # STEP 2: Clone Gaussian Splatting\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Clone Gaussian Splatting\")\n print(\"=\"*70)\n\n if not os.path.exists(WORK_DIR):\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"✓ Repository already exists\")\n\n # =====================================================================\n # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: Python packages (VERBOSE MODE)\")\n print(\"=\"*70)\n\n # ---- PyTorch (Colab CUDA対応) ----\n print(\"\\n📦 Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n\n # ---- Core utils ----\n print(\"\\n📦 Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\"\n ])\n\n # ---- transformers (NumPy 1.26 compatible) ----\n print(\"\\n📦 Installing transformers (NumPy 1.26 compatible)...\")\n # Install transformers with proper dependencies\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n\n # ---- LightGlue stack (GITHUB INSTALL) ----\n print(\"\\n📦 Installing LightGlue stack...\")\n\n # Install kornia first\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n\n # Install h5py (sometimes needed)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n\n # Install matplotlib (LightGlue dependency)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n\n '''\n # Install LightGlue directly from GitHub (more reliable)\n print(\" Installing LightGlue from GitHub...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\",\n \"git+https://github.com/cvg/LightGlue.git\"])\n '''\n\n # Install pycolmap\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n # =====================================================================\n # STEP 4: Build GS submodules\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Build Gaussian Splatting submodules\")\n print(\"=\"*70)\n\n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n\n for name, repo in submodules.items():\n print(f\"\\n📦 Installing {name}...\")\n path = os.path.join(WORK_DIR, \"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n # =====================================================================\n # STEP 5: Detailed Verification\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Detailed Verification\")\n print(\"=\"*70)\n\n # NumPy (verify version first)\n print(\"\\n🔍 Testing NumPy...\")\n try:\n import numpy as np\n print(f\" ✓ NumPy: {np.__version__}\")\n except Exception as e:\n print(f\" ❌ NumPy failed: {e}\")\n\n # PyTorch\n print(\"\\n🔍 Testing PyTorch...\")\n try:\n import torch\n print(f\" ✓ PyTorch: {torch.__version__}\")\n print(f\" ✓ CUDA available: {torch.cuda.is_available()}\")\n if torch.cuda.is_available():\n print(f\" ✓ CUDA version: {torch.version.cuda}\")\n except Exception as e:\n print(f\" ❌ PyTorch failed: {e}\")\n\n # transformers\n print(\"\\n🔍 Testing transformers...\")\n try:\n import transformers\n print(f\" ✓ transformers version: {transformers.__version__}\")\n from transformers import AutoModel\n print(f\" ✓ AutoModel import: OK\")\n except Exception as e:\n print(f\" ❌ transformers failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"import transformers; print(transformers.__version__)\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n\n '''\n # LightGlue\n print(\"\\n🔍 Testing LightGlue...\")\n try:\n from lightglue import LightGlue, ALIKED\n print(f\" ✓ LightGlue: OK\")\n print(f\" ✓ ALIKED: OK\")\n except Exception as e:\n print(f\" ❌ LightGlue failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"from lightglue import LightGlue\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n '''\n\n # pycolmap\n print(\"\\n🔍 Testing pycolmap...\")\n try:\n import pycolmap\n print(f\" ✓ pycolmap: OK\")\n except Exception as e:\n print(f\" ❌ pycolmap failed: {e}\")\n\n # kornia\n print(\"\\n🔍 Testing kornia...\")\n try:\n import kornia\n print(f\" ✓ kornia: {kornia.__version__}\")\n except Exception as e:\n print(f\" ❌ kornia failed: {e}\")\n\n print(\"\\n\" + \"=\"*70)\n print(\"✅ SETUP COMPLETE\")\n print(\"=\"*70)\n print(f\"Working dir: {WORK_DIR}\")\n\n return WORK_DIR\n\n\nif __name__ == \"__main__\":\n setup_environment()","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:17:32.363444Z","iopub.status.busy":"2026-01-10T18:17:32.363175Z","iopub.status.idle":"2026-01-10T18:22:43.720241Z","shell.execute_reply":"2026-01-10T18:22:43.71938Z"},"papermill":{"duration":311.361656,"end_time":"2026-01-10T18:22:43.72161","exception":false,"start_time":"2026-01-10T18:17:32.359954","status":"completed"},"tags":[],"id":"be6df249","outputId":"dd5c68e7-6606-4e7d-c4d3-02fac5253a0f"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport glob\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n# =========================================================\n# Utility: aspect ratio preserved + black padding\n# =========================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory and returns the output directory\n and the list of generated file paths.\n\n Args:\n input_dir: Input directory containing source images\n output_dir: Output directory for processed images\n size: Target square size (default: 1024)\n max_images: Maximum number of SOURCE images to process (default: None = all images)\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"--- Step 1: Biplet-Square Normalization ---\")\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n generated_paths = []\n converted_count = 0\n size_stats = {}\n\n # Sort for consistent processing order\n image_files = sorted([f for f in os.listdir(input_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n\n # ★ max_images で元画像数を制限\n if max_images is not None:\n image_files = image_files[:max_images]\n print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n\n for img_file in image_files:\n input_path = os.path.join(input_dir, img_file)\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n # Tracking original aspect ratios\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops using the helper function\n crops = generate_two_crops(img, size)\n base_name, ext = os.path.splitext(img_file)\n\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n generated_paths.append(output_path)\n\n converted_count += 1\n print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n\n except Exception as e:\n print(f\" ✗ Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Total output images: {len(generated_paths)}\")\n print(f\"Original size distribution: {size_stats}\")\n\n return output_dir, generated_paths\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape → Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square → Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops\n","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:22:43.739411Z","iopub.status.busy":"2026-01-10T18:22:43.738855Z","iopub.status.idle":"2026-01-10T18:22:43.755664Z","shell.execute_reply":"2026-01-10T18:22:43.754865Z"},"papermill":{"duration":0.027297,"end_time":"2026-01-10T18:22:43.756758","exception":false,"start_time":"2026-01-10T18:22:43.729461","status":"completed"},"tags":[],"id":"b8690389"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def run_colmap_reconstruction(image_dir, colmap_dir):\n \"\"\"Estimate camera poses and 3D point cloud with COLMAP\"\"\"\n print(\"Running SfM reconstruction with COLMAP...\")\n\n database_path = os.path.join(colmap_dir, \"database.db\")\n sparse_dir = os.path.join(colmap_dir, \"sparse\")\n os.makedirs(sparse_dir, exist_ok=True)\n\n # Set environment variable\n env = os.environ.copy()\n env['QT_QPA_PLATFORM'] = 'offscreen'\n\n # Feature extraction\n print(\"1/4: Extracting features...\")\n subprocess.run([\n 'colmap', 'feature_extractor',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--ImageReader.single_camera', '1',\n '--ImageReader.camera_model', 'OPENCV',\n '--SiftExtraction.use_gpu', '0' # Use CPU\n ], check=True, env=env)\n\n # Feature matching\n print(\"2/4: Matching features...\")\n subprocess.run([\n 'colmap', 'exhaustive_matcher', # Use sequential_matcher instead of exhaustive_matcher\n '--database_path', database_path,\n '--SiftMatching.use_gpu', '0' # Use CPU\n ], check=True, env=env)\n\n # Sparse reconstruction\n print(\"3/4: Sparse reconstruction...\")\n subprocess.run([\n 'colmap', 'mapper',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--output_path', sparse_dir,\n '--Mapper.ba_global_max_num_iterations', '20', # Speed up\n '--Mapper.ba_local_max_num_iterations', '10'\n ], check=True, env=env)\n\n # Export to text format\n print(\"4/4: Exporting to text format...\")\n model_dir = os.path.join(sparse_dir, '0')\n if not os.path.exists(model_dir):\n # Use the first model found\n subdirs = [d for d in os.listdir(sparse_dir) if os.path.isdir(os.path.join(sparse_dir, d))]\n if subdirs:\n model_dir = os.path.join(sparse_dir, subdirs[0])\n else:\n raise FileNotFoundError(\"COLMAP reconstruction failed\")\n\n subprocess.run([\n 'colmap', 'model_converter',\n '--input_path', model_dir,\n '--output_path', model_dir,\n '--output_type', 'TXT'\n ], check=True, env=env)\n\n print(f\"COLMAP reconstruction complete: {model_dir}\")\n return model_dir\n\n\ndef convert_cameras_to_pinhole(input_file, output_file):\n \"\"\"Convert camera model to PINHOLE format\"\"\"\n print(f\"Reading camera file: {input_file}\")\n\n with open(input_file, 'r') as f:\n lines = f.readlines()\n\n converted_count = 0\n with open(output_file, 'w') as f:\n for line in lines:\n if line.startswith('#') or line.strip() == '':\n f.write(line)\n else:\n parts = line.strip().split()\n if len(parts) >= 4:\n cam_id = parts[0]\n model = parts[1]\n width = parts[2]\n height = parts[3]\n params = parts[4:]\n\n # Convert to PINHOLE format\n if model == \"PINHOLE\":\n f.write(line)\n elif model == \"OPENCV\":\n # OPENCV: fx, fy, cx, cy, k1, k2, p1, p2\n fx = params[0]\n fy = params[1]\n cx = params[2]\n cy = params[3]\n f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n converted_count += 1\n else:\n # Convert other models too\n fx = fy = max(float(width), float(height))\n cx = float(width) / 2\n cy = float(height) / 2\n f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n converted_count += 1\n else:\n f.write(line)\n\n print(f\"Converted {converted_count} cameras to PINHOLE format\")\n\n\ndef prepare_gaussian_splatting_data(image_dir, colmap_model_dir):\n \"\"\"Prepare data for Gaussian Splatting\"\"\"\n print(\"Preparing data for Gaussian Splatting...\")\n\n data_dir = f\"{WORK_DIR}/data/video\"\n os.makedirs(f\"{data_dir}/sparse/0\", exist_ok=True)\n os.makedirs(f\"{data_dir}/images\", exist_ok=True)\n\n # Copy images\n print(\"Copying images...\")\n img_count = 0\n for img_file in os.listdir(image_dir):\n if img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n shutil.copy(\n os.path.join(image_dir, img_file),\n f\"{data_dir}/images/{img_file}\"\n )\n img_count += 1\n print(f\"Copied {img_count} images\")\n\n # Convert and copy camera file to PINHOLE format\n print(\"Converting camera model to PINHOLE format...\")\n convert_cameras_to_pinhole(\n os.path.join(colmap_model_dir, 'cameras.txt'),\n f\"{data_dir}/sparse/0/cameras.txt\"\n )\n\n # Copy other files\n for filename in ['images.txt', 'points3D.txt']:\n src = os.path.join(colmap_model_dir, filename)\n dst = f\"{data_dir}/sparse/0/{filename}\"\n if os.path.exists(src):\n shutil.copy(src, dst)\n print(f\"Copied {filename}\")\n else:\n print(f\"Warning: {filename} not found\")\n\n print(f\"Data preparation complete: {data_dir}\")\n return data_dir\n\n\ndef train_gaussian_splatting(data_dir, iterations=3000):\n \"\"\"Train the Gaussian Splatting model\"\"\"\n print(f\"Training Gaussian Splatting model for {iterations} iterations...\")\n\n model_path = f\"{WORK_DIR}/output/video\"\n\n cmd = [\n sys.executable, 'train.py',\n '-s', data_dir,\n '-m', model_path,\n '--iterations', str(iterations),\n '--eval'\n ]\n\n subprocess.run(cmd, cwd=WORK_DIR, check=True)\n\n return model_path\n\n\ndef render_video(model_path, output_video_path, iteration=3000):\n \"\"\"Generate video from the trained model\"\"\"\n print(\"Rendering video...\")\n\n # Execute rendering\n cmd = [\n sys.executable, 'render.py',\n '-m', model_path,\n '--iteration', str(iteration)\n ]\n\n subprocess.run(cmd, cwd=WORK_DIR, check=True)\n\n # Find the rendering directory\n possible_dirs = [\n f\"{model_path}/test/ours_{iteration}/renders\",\n f\"{model_path}/train/ours_{iteration}/renders\",\n ]\n\n render_dir = None\n for test_dir in possible_dirs:\n if os.path.exists(test_dir):\n render_dir = test_dir\n print(f\"Rendering directory found: {render_dir}\")\n break\n\n if render_dir and os.path.exists(render_dir):\n render_imgs = sorted([f for f in os.listdir(render_dir) if f.endswith('.png')])\n\n if render_imgs:\n print(f\"Found {len(render_imgs)} rendered images\")\n\n # Create video with ffmpeg\n subprocess.run([\n 'ffmpeg', '-y',\n '-framerate', '30',\n '-pattern_type', 'glob',\n '-i', f\"{render_dir}/*.png\",\n '-c:v', 'libx264',\n '-pix_fmt', 'yuv420p',\n '-crf', '18',\n output_video_path\n ], check=True)\n\n print(f\"Video saved: {output_video_path}\")\n return True\n\n print(\"Error: Rendering directory not found\")\n return False\n\n\ndef create_gif(video_path, gif_path):\n \"\"\"Create GIF from MP4\"\"\"\n print(\"Creating animated GIF...\")\n\n subprocess.run([\n 'ffmpeg', '-y',\n '-i', video_path,\n '-vf', 'setpts=8*PTS,fps=10,scale=720:-1:flags=lanczos',\n '-loop', '0',\n gif_path\n ], check=True)\n\n if os.path.exists(gif_path):\n size_mb = os.path.getsize(gif_path) / (1024 * 1024)\n print(f\"GIF creation complete: {gif_path} ({size_mb:.2f} MB)\")\n return True\n\n return False","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:22:43.772525Z","iopub.status.busy":"2026-01-10T18:22:43.772303Z","iopub.status.idle":"2026-01-10T18:22:43.790574Z","shell.execute_reply":"2026-01-10T18:22:43.789515Z"},"papermill":{"duration":0.027612,"end_time":"2026-01-10T18:22:43.791681","exception":false,"start_time":"2026-01-10T18:22:43.764069","status":"completed"},"tags":[],"id":"7acc20b6"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024, max_images=100):\n \"\"\"Main execution function\"\"\"\n try:\n # Step 1: 画像の正規化と前処理\n print(\"=\"*60)\n print(\"Step 1: Normalizing and preprocessing images\")\n print(\"=\"*60)\n\n frame_dir = os.path.join(COLMAP_DIR, \"images\")\n os.makedirs(frame_dir, exist_ok=True)\n\n # 画像を正規化して直接COLMAPのディレクトリに保存\n num_processed = normalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=frame_dir, # 直接colmap/imagesに保存\n size=square_size,\n max_images=max_images\n )\n\n print(f\"Processed {num_processed} images\")\n\n # Step 2: Estimate Camera Info with COLMAP\n print(\"=\"*60)\n print(\"Step 2: Running COLMAP reconstruction\")\n print(\"=\"*60)\n colmap_model_dir = run_colmap_reconstruction(frame_dir, COLMAP_DIR)\n\n # Step 3: Prepare Data for Gaussian Splatting\n print(\"=\"*60)\n print(\"Step 3: Preparing Gaussian Splatting data\")\n print(\"=\"*60)\n data_dir = prepare_gaussian_splatting_data(frame_dir, colmap_model_dir)\n\n # Step 4: Train Model\n print(\"=\"*60)\n print(\"Step 4: Training Gaussian Splatting model\")\n print(\"=\"*60)\n model_path = train_gaussian_splatting(data_dir, iterations=3000)\n\n # Step 5: Render Video\n print(\"=\"*60)\n print(\"Step 5: Rendering video\")\n print(\"=\"*60)\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n output_video = os.path.join(OUTPUT_DIR, \"gaussian_splatting_video.mp4\")\n\n success = render_video(model_path, output_video, iteration=3000)\n\n if success:\n print(\"=\"*60)\n print(f\"Success! Video generation complete: {output_video}\")\n print(\"=\"*60)\n\n # Create GIF\n output_gif = os.path.join(OUTPUT_DIR, \"gaussian_splatting_video.gif\")\n create_gif(output_video, output_gif)\n\n # Display result\n from IPython.display import Image, display\n display(Image(open(output_gif, 'rb').read()))\n\n return output_video, output_gif\n else:\n print(\"Warning: Rendering complete, but video was not generated\")\n return None, None\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n import traceback\n traceback.print_exc()\n return None, None\n\n\nif __name__ == \"__main__\":\n IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n OUTPUT_DIR = \"/content/output\"\n COLMAP_DIR = \"/content/colmap_workspace\"\n\n video_path, gif_path = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024,\n max_images=30\n )\n\n\n","metadata":{"execution":{"iopub.execute_input":"2026-01-10T18:22:43.807508Z","iopub.status.busy":"2026-01-10T18:22:43.807294Z","iopub.status.idle":"2026-01-11T00:00:17.03089Z","shell.execute_reply":"2026-01-11T00:00:17.029927Z"},"papermill":{"duration":20253.434865,"end_time":"2026-01-11T00:00:17.234174","exception":false,"start_time":"2026-01-10T18:22:43.799309","status":"completed"},"tags":[],"id":"f75233a8","outputId":"d86dfee5-6f09-403d-c947-5fcd689d067b"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"","metadata":{"papermill":{"duration":0.49801,"end_time":"2026-01-11T00:00:18.165833","exception":false,"start_time":"2026-01-11T00:00:17.667823","status":"completed"},"tags":[],"id":"e17ec719"}},{"cell_type":"markdown","source":"","metadata":{"papermill":{"duration":0.427583,"end_time":"2026-01-11T00:00:19.008387","exception":false,"start_time":"2026-01-11T00:00:18.580804","status":"completed"},"tags":[],"id":"38b3974c"}}]} |