stpete2 commited on
Commit
19f5c20
Β·
verified Β·
1 Parent(s): 620aa3e

Delete tetsu28-frames-cpu-gaussian-splat.ipynb

Browse files
tetsu28-frames-cpu-gaussian-splat.ipynb DELETED
@@ -1 +0,0 @@
1
- {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":14308076,"sourceType":"datasetVersion","datasetId":1429416}],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"","metadata":{"_uuid":"eeb1acde-fd95-42f4-983e-58854c0b2f2a","_cell_guid":"8ed69b5e-6ae1-4690-bae7-5846cf3b19c9","trusted":true,"collapsed":false,"jupyter":{"outputs_hidden":false}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **Tetsu28 Frames: CPU Gaussian Splat**","metadata":{}},{"cell_type":"markdown","source":" n_images = 100\n unique frames total = 500\n exhaustive_matcher\n normalize_image_sizes = 800\n iteration = 60\n time = min w/cpu\n result = ","metadata":{}},{"cell_type":"markdown","source":" n_images = 30\n unique frames total = 500\n exhaustive_matcher\n normalize_image_sizes = 800\n iteration = 60\n time = 17 min w/cpu\n result = failure","metadata":{}},{"cell_type":"code","source":"import os\nimport cv2\nfrom PIL import Image\nimport glob\nimport numpy as np\nimport sys\nimport subprocess\nimport shutil\nfrom pathlib import Path\nimport random\n\nIMAGE_PATH = \"/kaggle/input/two-dogs/tetsu28_frames/tetsu28_frames\"\nWORK_DIR = '/kaggle/working/gaussian_splatting'\nOUTPUT_DIR = '/kaggle/working/output'\nCOLMAP_DIR = '/kaggle/working/colmap_data'\n\n#os.environ['CUDA_HOME'] = '/usr/local/cuda'\nos.environ['TORCH_CUDA_ARCH_LIST'] = '7.0;7.5;8.0;8.6'\nos.environ['FORCE_CUDA'] = '1'","metadata":{"_uuid":"eeb1acde-fd95-42f4-983e-58854c0b2f2a","_cell_guid":"8ed69b5e-6ae1-4690-bae7-5846cf3b19c9","collapsed":false,"jupyter":{"outputs_hidden":false},"trusted":true,"execution":{"iopub.status.busy":"2025-12-17T14:28:38.122759Z","iopub.execute_input":"2025-12-17T14:28:38.122995Z","iopub.status.idle":"2025-12-17T14:28:38.490668Z","shell.execute_reply.started":"2025-12-17T14:28:38.122962Z","shell.execute_reply":"2025-12-17T14:28:38.487219Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def setup_environment():\n \"\"\"\n Setup environment with clean NumPy installation at the beginning\n \"\"\"\n print(\"Setting up environment for Kaggle...\")\n WORK_DIR = '/kaggle/working/gaussian_splatting'\n\n # ========================================================================\n # STEP 0: Clean NumPy installation BEFORE importing anything\n # ========================================================================\n print(\"=\"*70)\n print(\"STEP 0: Fixing NumPy compatibility (clean install)\")\n print(\"=\"*70)\n \n try:\n # Uninstall NumPy completely\n print(\"Uninstalling NumPy 2.x...\")\n subprocess.run([\n sys.executable, '-m', 'pip', 'uninstall', '-y', 'numpy'\n ], check=True, capture_output=True)\n print(\"βœ“ NumPy uninstalled\")\n \n # Install NumPy 1.x\n print(\"Installing NumPy 1.x...\")\n subprocess.run([\n sys.executable, '-m', 'pip', 'install', 'numpy<2'\n ], check=True, capture_output=True)\n print(\"βœ“ NumPy 1.x installed\")\n \n # Reinstall key packages that depend on NumPy\n print(\"Reinstalling NumPy-dependent packages...\")\n packages_to_reinstall = [\n 'scikit-learn',\n 'scipy',\n 'matplotlib',\n 'pandas'\n ]\n \n for pkg in packages_to_reinstall:\n try:\n subprocess.run([\n sys.executable, '-m', 'pip', 'install', '--force-reinstall',\n '--no-deps', pkg\n ], check=True, capture_output=True)\n print(f\"βœ“ Reinstalled {pkg}\")\n except subprocess.CalledProcessError:\n print(f\"⚠ Failed to reinstall {pkg} (may not be critical)\")\n \n # Verify NumPy version\n result = subprocess.run([\n sys.executable, '-c', 'import numpy; print(numpy.__version__)'\n ], capture_output=True, text=True)\n numpy_version = result.stdout.strip()\n print(f\"\\nβœ“ NumPy version now: {numpy_version}\")\n \n if numpy_version.startswith('1.'):\n print(\"βœ“ NumPy fix successful!\")\n else:\n print(f\"⚠ Warning: NumPy version is {numpy_version}, expected 1.x\")\n \n except subprocess.CalledProcessError as e:\n print(f\"⚠ NumPy fix encountered issues: {e}\")\n print(\"Continuing anyway...\")\n\n # ========================================================================\n # STEP 1: System packages and dependencies\n # ========================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: Installing system packages\")\n print(\"=\"*70)\n \n # Virtual display setup\n try:\n print(\"Setting up virtual display...\")\n subprocess.run(['apt-get', 'update', '-qq'], check=True, capture_output=True)\n subprocess.run(['apt-get', 'install', '-y', '-qq', 'xvfb'], \n check=True, capture_output=True)\n \n os.environ['QT_QPA_PLATFORM'] = 'offscreen'\n os.environ['DISPLAY'] = ':99'\n subprocess.Popen(['Xvfb', ':99', '-screen', '0', '1024x768x24'], \n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n print(\"βœ“ Virtual display setup\")\n except Exception as e:\n print(f\"⚠ Virtual display skipped: {e}\")\n\n # Install COLMAP\n print(\"\\nInstalling COLMAP...\")\n try:\n subprocess.run(['apt-get', 'install', '-y', '-qq', 'colmap'], \n check=True, capture_output=True)\n print(\"βœ“ COLMAP installed\")\n except subprocess.CalledProcessError as e:\n print(f\"⚠ COLMAP warning: {e}\")\n\n # Install build dependencies\n print(\"\\nInstalling build dependencies...\")\n try:\n subprocess.run([\n 'apt-get', 'install', '-y', '-qq',\n 'build-essential', 'cmake', 'git', 'libopenblas-dev'\n ], check=True, capture_output=True)\n print(\"βœ“ Build dependencies installed\")\n except subprocess.CalledProcessError as e:\n print(f\"⚠ Build dependencies warning: {e}\")\n\n # ========================================================================\n # STEP 2: Clone Gaussian Splatting repository\n # ========================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Cloning repository\")\n print(\"=\"*70)\n \n if not os.path.exists(WORK_DIR):\n print(f\"Cloning to {WORK_DIR}...\")\n try:\n subprocess.run([\n 'git', 'clone', '--recursive',\n 'https://github.com/graphdeco-inria/gaussian-splatting.git',\n WORK_DIR\n ], check=True)\n print(\"βœ“ Repository cloned\")\n except subprocess.CalledProcessError:\n print(\"Primary repository failed, trying alternative...\")\n try:\n subprocess.run([\n 'git', 'clone', '--recursive',\n 'https://github.com/tztechno/gaussian-splatting.git',\n WORK_DIR\n ], check=True)\n print(\"βœ“ Alternative repository cloned\")\n except subprocess.CalledProcessError as e:\n print(f\"βœ— Both repositories failed: {e}\")\n raise\n else:\n print(f\"βœ“ Repository exists at {WORK_DIR}\")\n try:\n subprocess.run(['git', 'submodule', 'update', '--init', '--recursive'], \n cwd=WORK_DIR, check=True, capture_output=True)\n print(\"βœ“ Submodules updated\")\n except subprocess.CalledProcessError:\n print(\"⚠ Submodule update failed\")\n\n # ========================================================================\n # STEP 3: Install Python packages\n # ========================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: Installing Python packages\")\n print(\"=\"*70)\n \n pip_packages = [\n 'torch', 'torchvision', 'torchaudio', \n 'plyfile', 'tqdm', 'opencv-python', 'pillow',\n 'imageio', 'imageio-ffmpeg', 'tensorboard'\n ]\n \n for package in pip_packages:\n try:\n subprocess.run([\n sys.executable, '-m', 'pip', 'install', '-q', package\n ], check=True, capture_output=True)\n print(f\"βœ“ Installed {package}\")\n except subprocess.CalledProcessError:\n print(f\"⚠ Failed to install {package}\")\n\n # ========================================================================\n # STEP 4: Verify CUDA\n # ========================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Verifying CUDA\")\n print(\"=\"*70)\n \n try:\n import torch\n print(f\"PyTorch: {torch.__version__}\")\n print(f\"CUDA available: {torch.cuda.is_available()}\")\n if torch.cuda.is_available():\n print(f\"CUDA version: {torch.version.cuda}\")\n print(f\"GPU: {torch.cuda.get_device_name(0)}\")\n except ImportError:\n print(\"⚠ PyTorch not available\")\n\n # ========================================================================\n # STEP 5: Build submodules\n # ========================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Building submodules\")\n print(\"=\"*70)\n \n submodules = [\n ('diff-gaussian-rasterization', \n 'https://github.com/graphdeco-inria/diff-gaussian-rasterization.git'),\n ('simple-knn', \n 'https://github.com/camenduru/simple-knn.git')\n ]\n \n for submodule_name, fallback_url in submodules:\n print(f\"\\n{'-'*70}\")\n print(f\"Building {submodule_name}...\")\n print(f\"{'-'*70}\")\n \n submodule_dir = os.path.join(WORK_DIR, 'submodules', submodule_name)\n \n # Check/clone submodule\n if not os.path.exists(submodule_dir) or not os.listdir(submodule_dir):\n print(f\"Cloning {submodule_name}...\")\n try:\n subprocess.run(['git', 'clone', fallback_url, submodule_dir], \n check=True)\n print(f\"βœ“ Cloned {submodule_name}\")\n except subprocess.CalledProcessError:\n print(f\"βœ— Failed to clone {submodule_name}\")\n continue\n \n # Try installation methods in order\n methods = [\n (\"pip install\", lambda: subprocess.run([\n sys.executable, '-m', 'pip', 'install', submodule_dir\n ], check=True, capture_output=True)),\n \n (\"setup.py install\", lambda: subprocess.run([\n sys.executable, 'setup.py', 'install'\n ], cwd=submodule_dir, check=True, capture_output=True)),\n \n (\"git install\", lambda: subprocess.run([\n sys.executable, '-m', 'pip', 'install', f'git+{fallback_url}'\n ], check=True, capture_output=True))\n ]\n \n for method_name, method_func in methods:\n try:\n print(f\"Trying {method_name}...\")\n method_func()\n print(f\"βœ“ {submodule_name} installed via {method_name}\")\n break\n except subprocess.CalledProcessError:\n print(f\"βœ— {method_name} failed\")\n else:\n print(f\"⚠ All methods failed for {submodule_name}\")\n\n # ========================================================================\n # STEP 6: Verify installations\n # ========================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 6: Verifying installations\")\n print(\"=\"*70)\n \n all_good = True\n \n try:\n import diff_gaussian_rasterization\n print(\"βœ“ diff_gaussian_rasterization available\")\n except ImportError:\n print(\"βœ— diff_gaussian_rasterization NOT FOUND\")\n all_good = False\n \n try:\n import simple_knn\n print(\"βœ“ simple_knn available\")\n except ImportError:\n print(\"βœ— simple_knn NOT FOUND\")\n all_good = False\n\n # Final summary\n print(\"\\n\" + \"=\"*70)\n if all_good:\n print(\"βœ“βœ“βœ“ SETUP COMPLETE - Ready to run! βœ“βœ“βœ“\")\n else:\n print(\"⚠⚠⚠ SETUP COMPLETED WITH WARNINGS ⚠⚠⚠\")\n print(\"Some modules may be missing. Training might fail.\")\n print(f\"Working directory: {WORK_DIR}\")\n print(\"=\"*70)\n \n return WORK_DIR\n\n\nif __name__ == \"__main__\":\n setup_environment()","metadata":{"_uuid":"eeb1acde-fd95-42f4-983e-58854c0b2f2a","_cell_guid":"8ed69b5e-6ae1-4690-bae7-5846cf3b19c9","collapsed":false,"jupyter":{"outputs_hidden":false},"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def adjust_gamma(image, gamma=1.2):\n \"\"\"gamma > 1.0 bright, gamma < 1.0 dark\"\"\"\n invGamma = 1.0 / gamma\n table = (np.array([((i / 255.0) ** invGamma) * 255\n for i in np.arange(0, 256)])).astype(\"uint8\")\n return cv2.LUT(image, table)\n\n\ndef process_frames_from_folder(image_folder, output_dir, max_frames=300, supported_formats=['jpg', 'jpeg', 'png', 'bmp']):\n \"\"\"Process frames from image folder instead of video\"\"\"\n print(f\"Processing frames from folder: {image_folder}\")\n\n os.makedirs(output_dir, exist_ok=True)\n\n # Search for supported image files\n image_files = []\n for fmt in supported_formats:\n pattern = os.path.join(image_folder, f\"*.{fmt}\")\n image_files.extend(glob.glob(pattern))\n pattern = os.path.join(image_folder, f\"*.{fmt.upper()}\")\n image_files.extend(glob.glob(pattern))\n\n # Sort by filename\n image_files.sort()\n\n if not image_files:\n raise ValueError(f\"No image files found in: {image_folder}\")\n\n print(f\"Found {len(image_files)} image files\")\n\n # Limit to maximum number of frames\n image_files = random.sample(image_files,min(len(image_files),max_frames))\n \n #image_files = image_files[:max_frames]\n \n saved_count = 0\n\n for i, image_path in enumerate(image_files):\n try:\n # Read the image\n if image_path.lower().endswith(('.png','.jpeg','.bmp')):\n # Read with OpenCV (color space conversion may be needed)\n img = cv2.imread(image_path)\n if img is None:\n print(f\"Warning: Could not read {image_path} with OpenCV, trying PIL\")\n pil_img = Image.open(image_path).convert('RGB') # Ensure RGB for conversion\n img = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)\n else:\n img = cv2.imread(image_path)\n\n if img is None:\n print(f\"Warning: Could not read {image_path}, skipping\")\n continue\n\n # brightness\n #img = adjust_gamma(img, gamma=1.1)\n print(img.shape)\n\n # Generate output path\n output_path = os.path.join(output_dir, f\"frame_{saved_count:05d}.jpg\")\n\n # Save the image\n cv2.imwrite(output_path, img, [cv2.IMWRITE_JPEG_QUALITY, 95])\n saved_count += 1\n\n except Exception as e:\n print(f\"Error processing {image_path}: {e}\")\n continue\n\n print(f\"Processed {saved_count} frames from folder\")\n\n return saved_count","metadata":{"_uuid":"eeb1acde-fd95-42f4-983e-58854c0b2f2a","_cell_guid":"8ed69b5e-6ae1-4690-bae7-5846cf3b19c9","collapsed":false,"jupyter":{"outputs_hidden":false},"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"**select exhaustive_matcher or sequential_matcher**","metadata":{}},{"cell_type":"code","source":"import os\nimport subprocess\nimport shutil\nimport sys\nfrom PIL import Image\n\n# Assuming WORK_DIR is defined elsewhere in the context\n# WORK_DIR = \"/path/to/work/directory\"\n\ndef run_colmap_reconstruction(image_dir, colmap_dir):\n \"\"\"Estimate camera poses and 3D point cloud with COLMAP\"\"\"\n print(\"Running SfM reconstruction with COLMAP...\")\n\n database_path = os.path.join(colmap_dir, \"database.db\")\n sparse_dir = os.path.join(colmap_dir, \"sparse\")\n os.makedirs(sparse_dir, exist_ok=True)\n\n # Set environment variable\n # Set 'offscreen' platform for Qt to run without a display server (e.g., in a container)\n env = os.environ.copy()\n env['QT_QPA_PLATFORM'] = 'offscreen'\n\n # Feature extraction\n print(\"1/4: Extracting features...\")\n subprocess.run([\n 'colmap', 'feature_extractor',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--ImageReader.single_camera', '1', # Assume a single camera for all images\n '--ImageReader.camera_model', 'OPENCV', # Use the OPENCV camera model\n '--SiftExtraction.use_gpu', '1' # Use GPU\n ], check=True, env=env)\n\n\n # Feature matching\n print(\"2/4: Matching features...\")\n subprocess.run([\n 'colmap', 'exhaustive_matcher', # Use exhaustive_matcher,sequential_matcher\n '--database_path', database_path,\n '--SiftMatching.use_gpu', '1', # Use GPU\n\n #'--SiftMatching.max_error', '4.0', # Default 4.0 β†’ 5.0 (More tolerant)\n #'--SiftMatching.cross_ratio_test', '0.8', # Default 0.8 β†’ 0.7 (More strict)\n\n ], check=True, env=env)\n\n\n # Sparse reconstruction\n print(\"3/4: Sparse reconstruction...\")\n subprocess.run([\n 'colmap', 'mapper',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--output_path', sparse_dir,\n '--Mapper.ba_global_max_num_iterations', '20', # Speed up global Bundle Adjustment\n '--Mapper.ba_local_max_num_iterations', '10' # Speed up local Bundle Adjustment\n ], check=True, env=env)\n\n # Export to text format\n print(\"4/4: Exporting to text format...\")\n model_dir = os.path.join(sparse_dir, '0')\n if not os.path.exists(model_dir):\n # Use the first model found if '0' doesn't exist (COLMAP might create other directories)\n subdirs = [d for d in os.listdir(sparse_dir) if os.path.isdir(os.path.join(sparse_dir, d))]\n if subdirs:\n model_dir = os.path.join(sparse_dir, subdirs[0])\n else:\n raise FileNotFoundError(\"COLMAP reconstruction failed: No model directory found\")\n\n subprocess.run([\n 'colmap', 'model_converter',\n '--input_path', model_dir,\n '--output_path', model_dir,\n '--output_type', 'TXT' # Convert to TXT format\n ], check=True, env=env)\n\n print(f\"COLMAP reconstruction complete: {model_dir}\")\n return model_dir","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def convert_cameras_to_pinhole(input_file, output_file):\n \"\"\"Convert camera model to PINHOLE format, typically from OPENCV\"\"\"\n print(f\"Reading camera file: {input_file}\")\n\n with open(input_file, 'r') as f:\n lines = f.readlines()\n\n converted_count = 0\n with open(output_file, 'w') as f:\n for line in lines:\n # Write comments and empty lines directly\n if line.startswith('#') or line.strip() == '':\n f.write(line)\n else:\n parts = line.strip().split()\n if len(parts) >= 4:\n cam_id = parts[0]\n model = parts[1]\n width = parts[2]\n height = parts[3]\n params = parts[4:]\n\n # Convert to PINHOLE format\n if model == \"PINHOLE\":\n f.write(line)\n elif model == \"OPENCV\":\n # OPENCV: fx, fy, cx, cy, k1, k2, p1, p2 (only need first four for PINHOLE)\n fx = params[0]\n fy = params[1]\n cx = params[2]\n cy = params[3]\n # PINHOLE: fx, fy, cx, cy\n f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n converted_count += 1\n else:\n # Convert other models by estimating PINHOLE parameters\n # Set focal length to the max of width/height, and principal point to the center\n fx = fy = max(float(width), float(height))\n cx = float(width) / 2\n cy = float(height) / 2\n f.write(f\"{cam_id} PINHOLE {width} {height} {fx} {fy} {cx} {cy}\\n\")\n converted_count += 1\n else:\n # Write lines that don't match the expected format\n f.write(line)\n\n print(f\"Converted {converted_count} cameras to PINHOLE format\")\n\n\n\ndef prepare_gaussian_splatting_data(image_dir, colmap_model_dir):\n \"\"\"Prepare data for Gaussian Splatting, structuring it in the expected format\"\"\"\n print(\"Preparing data for Gaussian Splatting...\")\n\n # Assumes WORK_DIR is defined globally or passed\n data_dir = f\"{WORK_DIR}/data/video\"\n os.makedirs(f\"{data_dir}/sparse/0\", exist_ok=True)\n os.makedirs(f\"{data_dir}/images\", exist_ok=True)\n\n # Copy images\n print(\"Copying images...\")\n img_count = 0\n for img_file in os.listdir(image_dir):\n if img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n shutil.copy(\n os.path.join(image_dir, img_file),\n f\"{data_dir}/images/{img_file}\"\n )\n img_count += 1\n print(f\"Copied {img_count} images\")\n\n # Convert and copy camera file to PINHOLE format\n print(\"Converting camera model to PINHOLE format...\")\n convert_cameras_to_pinhole(\n os.path.join(colmap_model_dir, 'cameras.txt'),\n f\"{data_dir}/sparse/0/cameras.txt\"\n )\n\n # Copy other files\n for filename in ['images.txt', 'points3D.txt']:\n src = os.path.join(colmap_model_dir, filename)\n dst = f\"{data_dir}/sparse/0/{filename}\"\n if os.path.exists(src):\n shutil.copy(src, dst)\n print(f\"Copied {filename}\")\n else:\n print(f\"Warning: {filename} not found\")\n\n print(f\"Data preparation complete: {data_dir}\")\n return data_dir\n\n\n\ndef train_gaussian_splatting(data_dir, iterations=3000):\n \"\"\"Train the Gaussian Splatting model\"\"\"\n print(f\"Training Gaussian Splatting model for {iterations} iterations...\")\n\n # Assumes WORK_DIR is defined globally or passed\n model_path = f\"{WORK_DIR}/output/video\"\n\n cmd = [\n sys.executable, 'train.py',\n '-s', data_dir,\n '-m', model_path,\n '--iterations', str(iterations),\n '--eval' # Optionally run an evaluation phase\n ]\n\n # Execute the training script from the WORK_DIR\n subprocess.run(cmd, cwd=WORK_DIR, check=True)\n\n return model_path\n\n\n\ndef render_video(model_path, output_video_path, iteration=3000):\n \"\"\"Generate video from the trained model by rendering a sequence of views\"\"\"\n print(\"Rendering video...\")\n\n # Execute rendering\n cmd = [\n sys.executable, 'render.py',\n '-m', model_path,\n '--iteration', str(iteration)\n ]\n\n # Execute the rendering script from the WORK_DIR\n subprocess.run(cmd, cwd=WORK_DIR, check=True)\n\n # Find the rendering directory\n possible_dirs = [\n f\"{model_path}/test/ours_{iteration}/renders\",\n f\"{model_path}/train/ours_{iteration}/renders\",\n ]\n\n render_dir = None\n for test_dir in possible_dirs:\n if os.path.exists(test_dir):\n render_dir = test_dir\n print(f\"Rendering directory found: {render_dir}\")\n break\n\n if render_dir and os.path.exists(render_dir):\n # Sort rendered PNG images for correct video sequence\n render_imgs = sorted([f for f in os.listdir(render_dir) if f.endswith('.png')])\n\n if render_imgs:\n print(f\"Found {len(render_imgs)} rendered images\")\n\n # Create video with ffmpeg\n # -y: overwrite output file without asking\n # -framerate 30: set input framerate to 30 FPS\n # -pattern_type glob -i: use glob pattern to specify input images\n # -c:v libx264: use h.264 video codec\n # -pix_fmt yuv420p: use a pixel format compatible with most players\n # -crf 18: Constant Rate Factor (lower is higher quality, 18 is generally high quality)\n subprocess.run([\n 'ffmpeg', '-y',\n '-framerate', '30',\n '-pattern_type', 'glob',\n '-i', f\"{render_dir}/*.png\",\n '-c:v', 'libx264',\n '-pix_fmt', 'yuv420p',\n '-crf', '18',\n output_video_path\n ], check=True)\n\n print(f\"Video saved: {output_video_path}\")\n return True\n\n print(\"Error: Rendering directory not found or no images rendered\")\n return False\n\n\n\ndef create_gif(video_path, gif_path):\n \"\"\"Create an animated GIF from an MP4 video file\"\"\"\n print(\"Creating animated GIF...\")\n\n # ffmpeg command to create a GIF\n # -vf: video filter graph\n # setpts=8*PTS: slows down the video by a factor of 8 (8x original duration)\n # fps=10: set output frame rate to 10 FPS\n # scale=720:-1:flags=lanczos: resize to 720px width, auto height, using Lanczos resampling\n # -loop 0: loop the GIF indefinitely\n subprocess.run([\n 'ffmpeg', '-y',\n '-i', video_path,\n '-vf', 'setpts=8*PTS,fps=10,scale=720:-1:flags=lanczos',\n '-loop', '0',\n gif_path\n ], check=True)\n\n if os.path.exists(gif_path):\n size_mb = os.path.getsize(gif_path) / (1024 * 1024)\n print(f\"GIF creation complete: {gif_path} ({size_mb:.2f} MB)\")\n return True\n\n return False\n\n\n\ndef normalize_image_sizes(image_dir, output_dir=None, target_size=1200):\n \"\"\"\n Center crops and resizes all images in a directory to a square target size.\n\n Args:\n image_dir: Directory containing input images.\n output_dir: Directory to save the processed images. Defaults to image_dir.\n target_size: The desired square size (e.g., 1200 means 1200x1200).\n \"\"\"\n if output_dir is None:\n output_dir = image_dir\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Normalizing image sizes to {target_size}x{target_size}...\")\n\n size_stats = {}\n converted_count = 0\n\n for img_file in sorted(os.listdir(image_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(image_dir, img_file)\n output_path = os.path.join(output_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size # (width, height)\n\n # Record original size for statistics\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n if size_key not in size_stats:\n size_stats[size_key] = 0\n size_stats[size_key] += 1\n\n # Method 1 (Used here): Center crop and resize (maintains aspect ratio for the cropped part)\n # Method 2 (Alternative): Resize only (changes aspect ratio)\n\n img = center_crop_and_resize(img, target_size)\n img.save(output_path, quality=95)\n converted_count += 1\n\n print(f\" βœ“ {img_file}: {original_size} β†’ {target_size}x{target_size}\")\n\n except Exception as e:\n print(f\" βœ— Error processing {img_file}: {e}\")\n\n print(f\"\\nConversion complete: {converted_count} images\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count\n\n\n\ndef center_crop_and_resize(img, target_size):\n \"\"\"Crops the image to a square from the center and then resizes it.\"\"\"\n width, height = img.size\n\n # 1. Determine the size of the square crop (the smaller dimension)\n crop_size = min(width, height)\n \n # 2. Calculate coordinates for center cropping\n left = (width - crop_size) // 2\n top = (height - crop_size) // 2\n right = left + crop_size\n bottom = top + crop_size\n\n # 3. Crop and resize\n img_cropped = img.crop((left, top, right, bottom))\n img_resized = img_cropped.resize((target_size, target_size), Image.Resampling.LANCZOS)\n\n return img_resized\n\n\n\ndef resize_only(img, target_size):\n \"\"\"Resizes the image to the target square size (may change aspect ratio).\"\"\"\n img_resized = img.resize((target_size, target_size), Image.Resampling.LANCZOS)\n return img_resized","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def main():\n \"\"\"Main execution function\"\"\"\n print(\"=\"*60)\n print(\"Gaussian Splatting Generation from MP4 Video\")\n print(\"=\"*60)\n try:\n # Step 1: Environment Setup\n setup_environment()\n\n # Step 2: Extract Frames from Video\n frame_dir = f\"{COLMAP_DIR}/images\"\n process_frames_from_folder(IMAGE_PATH, frame_dir, max_frames=100)\n\n # Step 2.5: Normalize Image Sizes (NEW)\n print(\"\\n\" + \"=\"*60)\n print(\"Step 2.5: Normalizing image sizes...\")\n print(\"=\"*60)\n normalize_image_sizes(frame_dir, target_size=800)\n\n # Step 3: Estimate Camera Info with COLMAP\n colmap_model_dir = run_colmap_reconstruction(frame_dir, COLMAP_DIR)\n\n # Step 4: Prepare Data for Gaussian Splatting\n data_dir = prepare_gaussian_splatting_data(frame_dir, colmap_model_dir)\n\n # Step 5: Train Model\n model_path = train_gaussian_splatting(data_dir, iterations=60)\n\n # Step 6: Render Video\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n output_video = f\"{OUTPUT_DIR}/gaussian_splatting_video.mp4\"\n success = render_video(model_path, output_video, iteration=60)\n\n if success:\n print(\"=\"*60)\n print(f\"Success! Video generation complete: {output_video}\")\n print(\"=\"*60)\n\n # Create GIF\n output_gif = f\"{OUTPUT_DIR}/gaussian_splatting_video.gif\"\n create_gif(output_video, output_gif)\n\n # Display result\n from IPython.display import Image\n display(Image(open(output_gif, 'rb').read()))\n else:\n print(\"Warning: Rendering complete, but video was not generated\")\n\n except Exception as e:\n print(f\"Error: {str(e)}\")\n import traceback\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n main()\n\nimport gc\ngc.collect()","metadata":{"_uuid":"eeb1acde-fd95-42f4-983e-58854c0b2f2a","_cell_guid":"8ed69b5e-6ae1-4690-bae7-5846cf3b19c9","collapsed":false,"jupyter":{"outputs_hidden":false},"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"**Please check the result in 3D Gaussian Splat Viewer**\n\nhttps://splat-three.vercel.app/?url=tetsu28_frame.splat","metadata":{}}]}