Upload mast3r-3d-reconstruction-for-gaussian-splat.ipynb
Browse files
mast3r-3d-reconstruction-for-gaussian-splat.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.12.12"},"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":14628798,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true},"papermill":{"default_parameters":{},"duration":967.270978,"end_time":"2026-01-20T01:22:34.649213","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-20T01:06:27.378235","version":"2.6.0"},"widgets":{"application/vnd.jupyter.widget-state+json":{"state":{"044f8e96b50e4c33b25e342091f4ec64":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"129b638c005640a591de9920f26201d1":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"2199c02789dc4f23accfd256dd3cac19":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2aedd953dfdf4ab98df88aefe1133503":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2d210fca524a4c9abcea1f361b86cc2c":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"33f1d82b482d4fa6b0a99cab070ca2ee":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_801fbba63af94d81bc548c40c3660ce4","IPY_MODEL_66eb589dd72c4555a561d3095df91257","IPY_MODEL_c11823a2992e4c33ad23d0b61b520751"],"layout":"IPY_MODEL_476a9de20d434d8d81d6177e50f1a6af","tabbable":null,"tooltip":null}},"40f2d021b0174597b5502a73cc88a808":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"476a9de20d434d8d81d6177e50f1a6af":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4c671a7ee42b4015aed241c0036c60b7":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_a661de9610b34f9cb36a026a4e15aae9","placeholder":"β","style":"IPY_MODEL_aec2ed624beb4f7b917880dc4b550370","tabbable":null,"tooltip":null,"value":"β346M/346Mβ[00:02<00:00,β246MB/s]"}},"52ddde5887ed4a3d83c8adebc13d2387":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5687e79b1d974c5cabfbeba43a3694c5":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_e212b430e00d4a5d967a385deecaab91","placeholder":"β","style":"IPY_MODEL_e20b857897f74db885a8dfc6d96887d5","tabbable":null,"tooltip":null,"value":"model.safetensors:β100%"}},"5859bcf5772e4820bec3683d25c9842a":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5687e79b1d974c5cabfbeba43a3694c5","IPY_MODEL_ae3558f346c14038a75a88bdce4d9fbd","IPY_MODEL_4c671a7ee42b4015aed241c0036c60b7"],"layout":"IPY_MODEL_c0e52e28ecda4e6696bcf3b7647cd6ed","tabbable":null,"tooltip":null}},"5c4e4442fa7a4f249b9d31d759f42dfa":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a33ea7c326fe4859a6eeb6324537e29d","IPY_MODEL_985ea3339fb84d28b9ad54cecc5ca023","IPY_MODEL_e32df049e2e74f7f9db8b8e029b03209"],"layout":"IPY_MODEL_2199c02789dc4f23accfd256dd3cac19","tabbable":null,"tooltip":null}},"62081223720d477b91f099e636f67ba2":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"66eb589dd72c4555a561d3095df91257":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_2d210fca524a4c9abcea1f361b86cc2c","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_db95f8aa3a684369afcd0a73fda7b405","tabbable":null,"tooltip":null,"value":436}},"6f552fcc4d28402d91491bfaf2989e81":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"71c6c07841974c91b9a490f34ad3f556":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"801fbba63af94d81bc548c40c3660ce4":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_71c6c07841974c91b9a490f34ad3f556","placeholder":"β","style":"IPY_MODEL_ea596f96884c4955a34cb895b3564ef1","tabbable":null,"tooltip":null,"value":"preprocessor_config.json:β100%"}},"898e4be9614645c1b11417befaa72cc7":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"985ea3339fb84d28b9ad54cecc5ca023":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_40f2d021b0174597b5502a73cc88a808","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2aedd953dfdf4ab98df88aefe1133503","tabbable":null,"tooltip":null,"value":548}},"a33ea7c326fe4859a6eeb6324537e29d":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_52ddde5887ed4a3d83c8adebc13d2387","placeholder":"β","style":"IPY_MODEL_e06705139cd04e1ea65f358a06c3f7cc","tabbable":null,"tooltip":null,"value":"config.json:β100%"}},"a661de9610b34f9cb36a026a4e15aae9":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ae3558f346c14038a75a88bdce4d9fbd":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_c4dbbad7fcbf40209cc53a5481508ca6","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_6f552fcc4d28402d91491bfaf2989e81","tabbable":null,"tooltip":null,"value":346345912}},"aec2ed624beb4f7b917880dc4b550370":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"c0e52e28ecda4e6696bcf3b7647cd6ed":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c11823a2992e4c33ad23d0b61b520751":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_044f8e96b50e4c33b25e342091f4ec64","placeholder":"β","style":"IPY_MODEL_129b638c005640a591de9920f26201d1","tabbable":null,"tooltip":null,"value":"β436/436β[00:00<00:00,β48.9kB/s]"}},"c4dbbad7fcbf40209cc53a5481508ca6":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"db95f8aa3a684369afcd0a73fda7b405":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e06705139cd04e1ea65f358a06c3f7cc":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"e20b857897f74db885a8dfc6d96887d5":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"e212b430e00d4a5d967a385deecaab91":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e32df049e2e74f7f9db8b8e029b03209":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_898e4be9614645c1b11417befaa72cc7","placeholder":"β","style":"IPY_MODEL_62081223720d477b91f099e636f67ba2","tabbable":null,"tooltip":null,"value":"β548/548β[00:00<00:00,β76.3kB/s]"}},"ea596f96884c4955a34cb895b3564ef1":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}}},"version_major":2,"version_minor":0}}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"## **MASt3R 3D Reconstruction for Gaussian Splat**\n","metadata":{"id":"qDQLX3PArmh8","papermill":{"duration":0.003504,"end_time":"2026-01-20T01:06:31.022336","exception":false,"start_time":"2026-01-20T01:06:31.018832","status":"completed"},"tags":[]}},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"\n\n---\n\n## **Pipeline Explanation**\n\nThis is a **complete 3D reconstruction pipeline** that transforms regular photos into interactive 3D scenes using Gaussian Splatting. It replaces traditional photogrammetry tools (COLMAP) with the modern **MASt3R model** for faster, more efficient processing.\n\n### **Pipeline Steps:**\n\n1. **Biplet-Square Normalization**\n - Converts images of any size into uniform square crops\n - Creates 2 views per image (Left/Right for landscape, Top/Bottom for portrait)\n - Preserves edge information that would be lost with single center crops\n\n2. **DINO Pair Selection**\n - Uses DINOv2 (vision transformer) to find visually similar image pairs\n - Intelligently limits pairs to prevent memory overflow\n - Ensures good coverage across all images for robust reconstruction\n\n3. **MASt3R Reconstruction**\n - Processes selected image pairs to estimate 3D geometry\n - Outputs: 3D point cloud + camera poses for each image\n - **Replaces the entire COLMAP pipeline** (no feature matching needed)\n\n4. **COLMAP Format Conversion**\n - Converts MASt3R output to COLMAP-compatible binary format\n - Handles coordinate transformations (camera-to-world β world-to-camera)\n - Downsamples point cloud to manageable size (default: 1M points)\n\n5. **Gaussian Splatting Training**\n - Trains a neural 3D representation from the reconstructed scene\n - Optimizes millions of 3D Gaussians to accurately reproduce the input views\n - Produces a real-time renderable 3D model\n\n---\n\n## **What is MASt3R?**\n\n**MASt3R** (Matching and Stereo 3D Reconstruction) is a state-of-the-art AI model that solves two critical problems in 3D reconstruction simultaneously:\n\n### **Traditional Approach (COLMAP):**\n```\nImages β Feature Detection (ALIKED) β Feature Matching (LightGlue) \n β Geometric Verification β Sparse Reconstruction (COLMAP)\n β Dense Reconstruction\n```\n**Problem:** Multiple separate models, slow, requires careful parameter tuning\n\n### **MASt3R Approach:**\n```\nImage Pairs β MASt3R Model β 3D Points + Camera Poses (done!)\n```\n**Advantage:** Single end-to-end model, faster, more robust\n\n### **How MASt3R Works:**\n\n1. **Input:** Takes pairs of overlapping images\n2. **Processing:** \n - Encodes images with Vision Transformer (ViT)\n - Predicts dense correspondences between images\n - Estimates depth for each pixel\n - Calculates relative camera poses\n3. **Output:** \n - Dense 3D point cloud\n - Camera positions and orientations\n - Confidence scores for each prediction\n\n### **Key Benefits:**\n- β
**No manual feature detection** needed\n- β
**Works with challenging scenes** (textureless, reflective surfaces)\n- β
**Faster than traditional pipelines**\n- β
**Built-in global alignment** for multi-view consistency\n- β
**Memory efficient** (processes pairs independently)\n\n---\n\n## **Why This Pipeline is Special:**\n\n- **Memory-conscious:** Designed for limited hardware (16GB GPU)\n- **Modular:** Each step can be run independently\n- **Robust:** Combines classical vision (DINO) with modern learning (MASt3R)\n- **End-to-end:** Raw photos β Interactive 3D model with one command\n- **Production-ready:** Includes error handling, progress tracking, binary format conversion\n\nThis pipeline democratizes high-quality 3D reconstruction by making it accessible on consumer hardware while maintaining professional results.\n\n---","metadata":{}},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Setup","metadata":{}},{"cell_type":"markdown","source":"This technical script details a specialized computational pipeline designed to prepare data for Gaussian Splatting, a method used to generate high-quality 3D scenes from 2D images. The workflow stands out by integrating the MASt3R model to handle complex tasks like image matching and spatial positioning, effectively replacing traditional tools like COLMAP to streamline the reconstruction process. Beyond its core logic, the code prioritizes rigorous memory management and environment configuration, ensuring the system can handle intensive graphical processing within restricted hardware environments. Ultimately, this source serves as a comprehensive automation framework that installs necessary dependencies and optimizes the transition from raw visual data to a structured 3D environment.","metadata":{}},{"cell_type":"code","source":"# MASt3R-based Gaussian Splatting Pipeline\n# Preserves: DINO pair selection + Biplet-Square Normalization\n# Replaces: ALIKED/LightGlue/COLMAP with MASt3R\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image, ImageFilter\nimport struct\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection - CRITICAL for memory\n GLOBAL_TOPK = 20 # Reduced from 50 - each image pairs with top 20\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\"\n \n # MASt3R - Reduced size for memory\n MAST3R_MODEL = \"/kaggle/working/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n MAST3R_IMAGE_SIZE = 224 # Small size to save memory\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n \n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n\n# ============================================================================\n# Environment Setup\n# ============================================================================\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"β Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_base_environment():\n \"\"\"Setup base Python environment\"\"\"\n print(\"\\n=== Setting up Base Environment ===\")\n \n # NumPy fix for Python 3.12\n print(\"\\nπ¦ Fixing NumPy...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n \n # PyTorch\n print(\"\\nπ¦ Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n \n # Core utilities\n print(\"\\nπ¦ Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\",\n \"scipy\", # for rotation conversions and image resizing\n \"psutil\" # for memory monitoring\n ])\n \n # Transformers for DINO\n print(\"\\nπ¦ Installing transformers...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n \n # pycolmap for COLMAP format\n print(\"\\nπ¦ Installing pycolmap...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n \n print(\"β Base environment setup complete!\")\n\n\ndef setup_mast3r():\n \"\"\"Install and setup MASt3R\"\"\"\n print(\"\\n=== Setting up MASt3R ===\")\n \n os.chdir('/kaggle/working')\n \n # Remove existing installation\n if os.path.exists('mast3r'):\n print(\"Removing existing MASt3R installation...\")\n os.system('rm -rf mast3r')\n \n # Clone repository\n print(\"Cloning MASt3R repository...\")\n os.system('git clone --recursive https://github.com/naver/mast3r')\n os.chdir('/kaggle/working/mast3r')\n \n # Check dust3r directory\n print(\"Checking dust3r structure...\")\n os.system('ls -la dust3r/')\n \n # Install dust3r\n print(\"Installing dust3r...\")\n os.system('cd dust3r && python -m pip install -e .')\n \n # Install croco\n print(\"Installing croco...\")\n os.system('cd dust3r/croco && python -m pip install -e .')\n \n # Install requirements\n print(\"Installing MASt3R requirements...\")\n os.system('pip install -r requirements.txt')\n \n # Download model weights\n print(\"Downloading model weights...\")\n os.system('mkdir -p checkpoints')\n os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n \n # Install additional dependencies\n print(\"Installing additional dependencies...\")\n os.system('pip install trimesh matplotlib roma')\n \n # Add to path\n sys.path.insert(0, '/kaggle/working/mast3r')\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n \n # Verification\n print(\"\\nπ Verifying MASt3R installation...\")\n try:\n from mast3r.model import AsymmetricMASt3R\n print(\" β MASt3R import: OK\")\n except Exception as e:\n print(f\" β MASt3R import failed: {e}\")\n raise\n \n print(\"β MASt3R setup complete!\")","metadata":{"trusted":true,"_kg_hide-output":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Biplet","metadata":{}},{"cell_type":"markdown","source":"This code provides a systematic method for standardizing image datasets by transforming various photo dimensions into uniform, high-quality squares. The script functions by analyzing the orientation of an input file and then extracting two distinct, overlapping viewsβeither left and right for landscape shots or top and bottom for portraits. By utilizing this dual-crop strategy, the program ensures that significant visual data from the edges of the original frame is preserved rather than lost to a single center cut. Ultimately, the process automates the normalization of visual data, resizing every output to a consistent resolution to prepare it for sophisticated computational tasks.","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# Step 0: Biplet-Square Normalization (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n converted_count = 0\n size_stats = {}\n\n for img_file in sorted(os.listdir(input_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n print(f\" β {img_file}: {original_size} β 2 square images generated\")\n\n except Exception as e:\n print(f\" β Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape β Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square β Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Dino","metadata":{}},{"cell_type":"markdown","source":"This code outlines a specialized workflow for identifying similar image pairs within a large dataset to optimize computational efficiency. It begins by using a DINO-based neural network to extract high-level visual signatures, which allows the system to calculate the mathematical similarity between various files. To ensure the results are manageable, the script filters these connections by selecting the top-k most similar matches and applying a diversity-focused strategy that prevents any single image from dominating the selection. Ultimately, this process serves as an intelligent pre-selection phase, ensuring that subsequent analysis focuses on the most relevant pairs while maintaining broad coverage across the entire image collection.","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# Step 1: DINO-based Pair Selection (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors with memory management\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n batch_size = 4 # Small batch to save memory\n \n for i in tqdm(range(0, len(image_paths), batch_size)):\n batch_paths = image_paths[i:i+batch_size]\n batch_imgs = []\n \n for img_path in batch_paths:\n img = load_torch_image(img_path, device)\n batch_imgs.append(img)\n \n batch_tensor = torch.cat(batch_imgs, dim=0)\n \n with torch.no_grad():\n inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n \n # Clear batch memory\n del batch_tensor, inputs, outputs, desc\n clear_memory()\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model, processor\n clear_memory()\n \n print(\"After DINO extraction:\")\n get_memory_info()\n\n return global_descs\n\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n # Remove duplicates\n pairs = list(set(pairs))\n \n return pairs\n\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n \"\"\"\n Select diverse pairs to ensure good image coverage\n Strategy: Select pairs that maximize image coverage\n \"\"\"\n import random\n random.seed(42)\n \n if len(pairs) <= max_pairs:\n return pairs\n \n print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n \n # Count how many times each image appears in pairs\n image_counts = {i: 0 for i in range(num_images)}\n for i, j in pairs:\n image_counts[i] += 1\n image_counts[j] += 1\n \n # Sort pairs by: prefer pairs with less-connected images\n def pair_score(pair):\n i, j = pair\n # Lower score = images appear in fewer pairs = more diverse\n return image_counts[i] + image_counts[j]\n \n pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n pairs_scored.sort(key=lambda x: x[1])\n \n # Select pairs greedily to maximize coverage\n selected = []\n selected_images = set()\n \n # Phase 1: Select pairs that add new images (greedy coverage)\n for pair, score in pairs_scored:\n if len(selected) >= max_pairs:\n break\n i, j = pair\n # Prefer pairs that include new images\n if i not in selected_images or j not in selected_images:\n selected.append(pair)\n selected_images.add(i)\n selected_images.add(j)\n \n # Phase 2: Fill remaining slots with high-similarity pairs\n if len(selected) < max_pairs:\n remaining = [p for p, s in pairs_scored if p not in selected]\n random.shuffle(remaining)\n selected.extend(remaining[:max_pairs - len(selected)])\n \n print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n \n return selected\n\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n \"\"\"DINO-based pair selection with intelligent limiting\"\"\"\n device = Config.DEVICE\n\n # DINO global features\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from DINO: {len(pairs)}\")\n \n # Apply intelligent pair selection if limit specified\n if max_pairs and len(pairs) > max_pairs:\n pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n \n return pairs","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Mast3r","metadata":{}},{"cell_type":"markdown","source":"\nThis code outlines a specialized workflow for 3D scene reconstruction that utilizes the MASt3R framework as a streamlined alternative to traditional photogrammetry pipelines. By integrating image loading, model inference, and global alignment, the script efficiently transforms a series of 2D photographs into a unified point cloud. The process is characterized by a strong focus on memory management, employing strategies such as image resizing and pair limitation to maintain performance on hardware with limited resources. Ultimately, the system automates the complex task of spatial optimization, calculating the most accurate geometric relationship between images to build a coherent digital environment.","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# Step 2: MASt3R Reconstruction (REPLACES ALIKED/LIGHTGLUE/COLMAP)\n# ============================================================================\n\ndef load_mast3r_model(device='cuda'):\n \"\"\"Load MASt3R model\"\"\"\n from mast3r.model import AsymmetricMASt3R\n \n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n \n print(f\"β MASt3R model loaded on {device}\")\n return model\n\ndef load_images_for_mast3r(image_paths, size=224):\n \"\"\"Load images using DUSt3R's format with reduced size\"\"\"\n print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n \n from dust3r.utils.image import load_images\n \n # Load images using DUSt3R's loader with reduced size\n images = load_images(image_paths, size=size, verbose=True)\n \n return images\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, max_pairs=None):\n \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n \n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n \n # Limit number of pairs if specified\n if max_pairs and len(pairs) > max_pairs:\n print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n # Select pairs more evenly distributed\n step = max(1, len(pairs) // max_pairs)\n pairs = pairs[::step][:max_pairs]\n \n print(f\"Processing {len(pairs)} pairs...\")\n \n # Load images in smaller size\n print(f\"Loading {len(image_paths)} images at {Config.MAST3R_IMAGE_SIZE}x{Config.MAST3R_IMAGE_SIZE}...\")\n images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n \n print(f\"Loaded {len(images)} images\")\n print(\"After loading images:\")\n get_memory_info()\n \n # Create all image pairs at once\n print(f\"Creating {len(pairs)} image pairs...\")\n mast3r_pairs = []\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n mast3r_pairs.append((images[idx1], images[idx2]))\n \n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n \n # Run inference (this returns the dict format we need)\n output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n \n # Clear pairs from memory\n del mast3r_pairs\n clear_memory()\n \n print(\"β MASt3R inference complete\")\n print(\"After inference:\")\n get_memory_info()\n \n # Global alignment\n print(\"Running global alignment...\")\n scene = global_aligner(\n output, \n device=device, \n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n \n # Clear output after creating scene\n del output\n clear_memory()\n \n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=150, # Reduced from 300\n schedule='cosine', \n lr=0.01\n )\n \n print(f\"β Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n \n return scene, images","metadata":{"execution":{"iopub.status.busy":"2026-01-26T17:26:04.803059Z","iopub.execute_input":"2026-01-26T17:26:04.803432Z","iopub.status.idle":"2026-01-26T17:26:04.824877Z","shell.execute_reply.started":"2026-01-26T17:26:04.803406Z","shell.execute_reply":"2026-01-26T17:26:04.824162Z"},"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":3},{"cell_type":"markdown","source":"# Ps1(process1)","metadata":{}},{"cell_type":"markdown","source":"This source provides a Python script designed to bridge the gap between MASt3R scene reconstructions and COLMAP, a standard format for 3D computer vision data. The code systematically extracts 3D point clouds and camera trajectories, performing necessary mathematical adjustments such as inverting camera-to-world poses and scaling focal lengths to match original image dimensions. To ensure efficiency, the script includes a downsampling mechanism that limits the total number of spatial points, preventing memory overflow while maintaining scene geometry. Finally, it serializes this processed information into binary files, specifically cameras, images, and 3D points, allowing the reconstructed scene to be opened and utilized by other specialized 3D software.","metadata":{}},{"cell_type":"code","source":"#v26\ndef extract_colmap_data(scene, image_paths, max_points=1000000):\n \"\"\"\n Extract COLMAP-compatible camera parameters and 3D points from MASt3R scene\n \n Args:\n scene: MASt3R scene object\n image_paths: List of image paths\n max_points: Maximum number of 3D points to extract (default: 1M)\n \"\"\"\n print(\"\\n=== Extracting COLMAP-compatible data ===\")\n \n # Extract point cloud\n pts_all = scene.get_pts3d()\n print(f\"pts_all type: {type(pts_all)}\")\n \n if isinstance(pts_all, list):\n print(f\"pts_all is a list with {len(pts_all)} elements\")\n if len(pts_all) > 0:\n print(f\"First element type: {type(pts_all[0])}\")\n if hasattr(pts_all[0], 'shape'):\n print(f\"First element shape: {pts_all[0].shape}\")\n \n pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p) \n for p in pts_all])\n print(f\"pts_all shape after conversion: {pts_all.shape}\")\n \n if len(pts_all.shape) == 4:\n print(f\"Found batched point cloud: {pts_all.shape}\")\n B, H, W, _ = pts_all.shape\n pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy() \n \n # Extract colors\n colors = []\n for img_path in image_paths:\n img = Image.open(img_path).resize((W, H))\n colors.append(np.array(img))\n colors = np.stack(colors).reshape(-1, 3) / 255.0\n else:\n pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n colors = np.ones((len(pts3d), 3)) * 0.5\n \n print(f\"β Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n \n # **DOWNSAMPLE POINTS TO REDUCE MEMORY USAGE**\n if len(pts3d) > max_points:\n print(f\"\\nβ Downsampling from {len(pts3d)} to {max_points} points to reduce memory usage...\")\n \n # Remove invalid points first\n valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n pts3d_valid = pts3d[valid_mask]\n colors_valid = colors[valid_mask]\n \n # Random sampling\n indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n pts3d = pts3d_valid[indices]\n colors = colors_valid[indices]\n \n print(f\"β Downsampled to {len(pts3d)} points\")\n \n # Extract camera parameters\n print(\"Extracting camera parameters...\")\n \n # [IMPORTANT] MASt3R poses are in camera-to-world format.\n # COLMAP requires world-to-camera format, so we need the inverse matrix.\n poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n \n # Convert camera-to-world to world-to-camera\n poses = []\n for i, pose_c2w in enumerate(poses_c2w):\n # Calculate the inverse of the 4x4 matrix\n pose_w2c = np.linalg.inv(pose_c2w)\n poses.append(pose_w2c)\n \n poses = np.array(poses)\n print(f\"Converted to world-to-camera poses for COLMAP\")\n \n # Get focal lengths and principal points\n focals = scene.get_focals().detach().cpu().numpy()\n pp = scene.get_principal_points().detach().cpu().numpy()\n print(f\"Focals shape: {focals.shape}\")\n print(f\"Principal points shape: {pp.shape}\")\n \n # MASt3R internal processing size (usually 224x224)\n mast3r_size = 224.0\n \n cameras = []\n for i, img_path in enumerate(image_paths):\n img = Image.open(img_path)\n W, H = img.size\n \n # Scale ratio relative to the original image size\n scale = W / mast3r_size\n \n # Focals are in [N, 1] format (fx=fy for isotropic cameras)\n if focals.shape[1] == 1:\n focal_mast3r = float(focals[i, 0])\n fx = fy = focal_mast3r * scale\n else:\n fx = float(focals[i, 0]) * scale\n fy = float(focals[i, 1]) * scale\n \n # Scale principal points as well\n cx = float(pp[i, 0]) * scale\n cy = float(pp[i, 1]) * scale\n \n camera = {\n 'camera_id': i + 1,\n 'model': 'PINHOLE',\n 'width': W,\n 'height': H,\n 'params': [fx, fy, cx, cy]\n }\n cameras.append(camera)\n \n if i == 0:\n print(f\"\\nExample camera 0:\")\n print(f\" Image size: {W}x{H}\")\n print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n print(f\" Pose (first row): {poses[i][0]}\")\n \n print(f\"\\nβ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n \n return pts3d, colors, cameras, poses\n\n\ndef save_colmap_reconstruction(pts3d, colors, cameras, poses, image_paths, output_dir):\n \"\"\"Save reconstruction in COLMAP binary format by writing files directly\"\"\"\n print(\"\\n=== Saving COLMAP reconstruction ===\")\n \n sparse_dir = Path(output_dir) / 'sparse' / '0'\n sparse_dir.mkdir(parents=True, exist_ok=True)\n \n print(f\" Writing COLMAP files directly to {sparse_dir}...\")\n \n # Write cameras.bin\n write_cameras_binary(cameras, sparse_dir / 'cameras.bin')\n print(f\" β Wrote {len(cameras)} cameras\")\n \n # Write images.bin\n write_images_binary(image_paths, cameras, poses, sparse_dir / 'images.bin')\n print(f\" β Wrote {len(image_paths)} images\")\n \n # Write points3D.bin\n num_points = write_points3d_binary(pts3d, colors, sparse_dir / 'points3D.bin')\n print(f\" β Wrote {num_points} 3D points\")\n \n print(f\"\\nβ COLMAP reconstruction saved to {sparse_dir}\")\n print(f\" Cameras: {len(cameras)}\")\n print(f\" Images: {len(image_paths)}\")\n print(f\" Points: {num_points}\")\n \n return sparse_dir\n\n\ndef write_cameras_binary(cameras, output_file):\n \"\"\"Write cameras.bin in COLMAP binary format\"\"\"\n with open(output_file, 'wb') as f:\n # Write number of cameras\n f.write(struct.pack('Q', len(cameras)))\n \n for i, cam in enumerate(cameras):\n camera_id = cam.get('camera_id', i + 1)\n \n # Model ID: 1 = PINHOLE\n model_id = 1\n width = cam['width']\n height = cam['height']\n params = cam['params'] # [fx, fy, cx, cy]\n \n f.write(struct.pack('i', camera_id))\n f.write(struct.pack('i', model_id))\n f.write(struct.pack('Q', width))\n f.write(struct.pack('Q', height))\n \n # Write 4 parameters for PINHOLE model\n for param in params[:4]:\n f.write(struct.pack('d', param))\n\n\ndef write_images_binary(image_paths, cameras, poses, output_file):\n \"\"\"Write images.bin in COLMAP binary format\"\"\"\n with open(output_file, 'wb') as f:\n # Write number of images\n f.write(struct.pack('Q', len(image_paths)))\n \n for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n image_id = i + 1\n camera_id = cameras[i].get('camera_id', i + 1)\n image_name = os.path.basename(img_path)\n \n # Extract rotation and translation\n R = pose[:3, :3]\n t = pose[:3, 3]\n \n # Convert rotation matrix to quaternion [w, x, y, z]\n qvec = rotmat2qvec(R)\n tvec = t\n \n # Write image data\n f.write(struct.pack('i', image_id))\n \n # Write quaternion (4 doubles)\n for q in qvec:\n f.write(struct.pack('d', float(q)))\n \n # Write translation vector (3 doubles)\n for tv in tvec:\n f.write(struct.pack('d', float(tv)))\n \n # Write camera ID\n f.write(struct.pack('i', camera_id))\n \n # Write image name (null-terminated string)\n f.write(image_name.encode('utf-8') + b'\\x00')\n \n # Write number of 2D points (0 for now, as we don't have 2D-3D correspondences)\n f.write(struct.pack('Q', 0))\n\n\ndef write_points3d_binary(pts3d, colors, output_file):\n \"\"\"Write points3D.bin in COLMAP binary format\"\"\"\n # Filter out invalid points\n valid_indices = []\n for i, pt in enumerate(pts3d):\n if not (np.isnan(pt).any() or np.isinf(pt).any()):\n valid_indices.append(i)\n \n with open(output_file, 'wb') as f:\n # Write number of points\n f.write(struct.pack('Q', len(valid_indices)))\n \n for idx, point_id in enumerate(valid_indices):\n pt = pts3d[point_id]\n color = colors[point_id]\n \n # Write point3D ID\n f.write(struct.pack('Q', point_id))\n \n # Write XYZ coordinates (3 doubles)\n for coord in pt:\n f.write(struct.pack('d', float(coord)))\n \n # Write RGB color (3 unsigned chars)\n col_int = (color * 255).astype(np.uint8)\n for c in col_int:\n f.write(struct.pack('B', int(c)))\n \n # Write error (1 double) - set to 0\n f.write(struct.pack('d', 0.0))\n \n # Write track length (number of images seeing this point)\n # Set to 0 as we don't have track information\n f.write(struct.pack('Q', 0))\n \n # Progress indicator\n if (idx + 1) % 1000000 == 0:\n print(f\" Wrote {idx + 1} / {len(valid_indices)} points...\")\n \n return len(valid_indices)\n\n\ndef rotmat2qvec(R):\n \"\"\"\n Convert rotation matrix to quaternion in COLMAP format [w, x, y, z]\n \n Args:\n R: 3x3 rotation matrix\n \n Returns:\n qvec: quaternion [w, x, y, z]\n \"\"\"\n # Ensure R is a numpy array\n R = np.asarray(R, dtype=np.float64)\n \n # Calculate trace\n trace = np.trace(R)\n \n if trace > 0:\n s = 0.5 / np.sqrt(trace + 1.0)\n w = 0.25 / s\n x = (R[2, 1] - R[1, 2]) * s\n y = (R[0, 2] - R[2, 0]) * s\n z = (R[1, 0] - R[0, 1]) * s\n elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n w = (R[2, 1] - R[1, 2]) / s\n x = 0.25 * s\n y = (R[0, 1] + R[1, 0]) / s\n z = (R[0, 2] + R[2, 0]) / s\n elif R[1, 1] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n w = (R[0, 2] - R[2, 0]) / s\n x = (R[0, 1] + R[1, 0]) / s\n y = 0.25 * s\n z = (R[1, 2] + R[2, 1]) / s\n else:\n s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n w = (R[1, 0] - R[0, 1]) / s\n x = (R[0, 2] + R[2, 0]) / s\n y = (R[1, 2] + R[2, 1]) / s\n z = 0.25 * s\n \n qvec = np.array([w, x, y, z], dtype=np.float64)\n \n # Normalize\n qvec = qvec / np.linalg.norm(qvec)\n \n return qvec","metadata":{"execution":{"iopub.status.busy":"2026-01-26T17:26:04.825900Z","iopub.execute_input":"2026-01-26T17:26:04.826318Z","iopub.status.idle":"2026-01-26T17:26:04.858614Z","shell.execute_reply.started":"2026-01-26T17:26:04.826234Z","shell.execute_reply":"2026-01-26T17:26:04.858034Z"},"papermill":{"duration":0.018921,"end_time":"2026-01-20T01:07:23.664244","exception":false,"start_time":"2026-01-20T01:07:23.645323","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":4},{"cell_type":"markdown","source":"# Gs(gaussian splat)","metadata":{}},{"cell_type":"markdown","source":"This script provides a technical framework for implementing Gaussian Splatting, a cutting-edge method used to generate high-quality 3D reconstructions from 2D images. The process begins by configuring the environment, which involves cloning necessary repositories and installing specialized submodules for rasterization and data processing. Once the infrastructure is ready, the code executes a training phase where it optimizes a point cloud model based on visual data. To ensure efficiency in resource-constrained environments, the script employs performance-tuning parameters such as reduced image resolution and controlled densification intervals. Ultimately, this workflow serves as an automated pipeline for transforming raw spatial data into a fully rendered 3D scene.","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# Step 3: Gaussian Splatting Training\n# ============================================================================\n\ndef setup_gaussian_splatting():\n \"\"\"Setup Gaussian Splatting\"\"\"\n print(\"\\n=== Setting up Gaussian Splatting ===\")\n \n os.chdir('/kaggle/working')\n \n WORK_DIR = \"gaussian-splatting\"\n \n if not os.path.exists(WORK_DIR):\n print(\"Cloning Gaussian Splatting repository...\")\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"β Repository already exists\")\n \n os.chdir(WORK_DIR)\n \n # Install requirements\n print(\"Installing Gaussian Splatting requirements...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n \n # Build submodules\n print(\"\\nπ¦ Building Gaussian Splatting submodules...\")\n \n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n \n for name, repo in submodules.items():\n print(f\"\\nπ¦ Installing {name}...\")\n path = os.path.join(\"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n \n print(\"β Gaussian Splatting setup complete!\")\n\n\ndef train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=2000):\n \"\"\"Train Gaussian Splatting model\"\"\"\n print(\"\\n\" + \"=\"*70)\n print(\"Step 6: Training Gaussian Splatting\")\n print(\"=\"*70)\n \n print(\"\\n=== Training Gaussian Splatting ===\")\n \n # Reduce memory usage with smaller resolution\n cmd = [\n 'python', 'train.py',\n '-s', colmap_dir,\n '--images', image_dir,\n '-m', output_dir,\n '--iterations', str(iterations),\n '--test_iterations', '1000', str(iterations),\n '--save_iterations', '1000', str(iterations),\n '--resolution', '2', # Reduce resolution to 1/2\n '--densify_grad_threshold', '0.001', # Higher threshold = fewer Gaussians\n '--densification_interval', '200', # Less frequent densification\n '--opacity_reset_interval', '5000', # Less frequent reset\n ]\n \n print(f\"Command: {' '.join(cmd)}\\n\")\n \n result = subprocess.run(\n cmd,\n cwd='/kaggle/working/gaussian-splatting',\n capture_output=True,\n text=True\n )\n \n print(result.stdout)\n if result.stderr:\n print(\"STDERR:\", result.stderr)\n \n if result.returncode != 0:\n raise RuntimeError(\"Gaussian Splatting training failed\")\n \n # Check output\n if not os.path.exists(os.path.join(output_dir, f'point_cloud/iteration_{iterations}/point_cloud.ply')):\n raise RuntimeError(f\"Expected output not found at iteration {iterations}\")\n \n print(f\"\\nβ Gaussian Splatting training completed successfully\")\n print(f\" Output: {output_dir}\")\n \n return output_dir","metadata":{"execution":{"iopub.status.busy":"2026-01-26T17:26:04.859456Z","iopub.execute_input":"2026-01-26T17:26:04.859737Z","iopub.status.idle":"2026-01-26T17:26:04.876673Z","shell.execute_reply.started":"2026-01-26T17:26:04.859713Z","shell.execute_reply":"2026-01-26T17:26:04.876033Z"},"papermill":{"duration":0.01366,"end_time":"2026-01-20T01:07:23.70848","exception":false,"start_time":"2026-01-20T01:07:23.69482","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":5},{"cell_type":"markdown","source":"# Main pipeline","metadata":{}},{"cell_type":"markdown","source":"\nThis code defines a comprehensive computational pipeline designed to transform a collection of standard photographs into a detailed 3D reconstruction. The process begins by normalizing images and using deep learning models to select the best pairs for matching, which ensures the system efficiently understands the spatial relationships between different viewpoints. By integrating advanced frameworks like MASt3R and COLMAP, the software translates 2D data into a structured 3D point cloud and camera poses. Ultimately, these results are used to train a Gaussian Splatting model, which creates a high-fidelity, renderable digital environment from the original visual input.","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# Main Pipeline\n# ============================================================================\ndef main_pipeline(image_dir, output_dir, square_size=224, iterations=2000, \n max_images=None, max_pairs=10000, max_points=1000000):\n \"\"\"\n Main pipeline for DINO matching -> MASt3R -> Gaussian Splatting\n \n Args:\n image_dir: Directory containing input images\n output_dir: Directory for output files\n square_size: Size to resize images for processing\n iterations: Number of training iterations\n max_images: Maximum number of images to process (None = all)\n max_pairs: Maximum number of image pairs for matching\n max_points: Maximum number of 3D points to extract (default: 1M)\n \"\"\"\n os.makedirs(output_dir, exist_ok=True)\n\n setup_base_environment()\n clear_memory()\n \n setup_mast3r()\n clear_memory()\n \n setup_gaussian_splatting()\n clear_memory()\n \n # Step 1: Normalize images to biplet-square format\n print(\"\\n\" + \"=\"*70)\n print(\"Step 1: Biplet-Square Normalization\")\n print(\"=\"*70)\n \n processed_image_dir = os.path.join(output_dir, \"processed_images\")\n \n # Get original images first\n original_image_paths = sorted([\n os.path.join(image_dir, f)\n for f in os.listdir(image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n \n # Limit original images if specified\n if max_images and len(original_image_paths) > max_images:\n print(f\"\\nβ οΈ Limiting to {max_images} original images\")\n original_image_paths = original_image_paths[:max_images]\n \n print(f\"Processing {len(original_image_paths)} original images β ~{len(original_image_paths)*2} after biplet-square\")\n \n # Only process the selected images\n temp_dir = os.path.join(output_dir, \"temp_originals\")\n os.makedirs(temp_dir, exist_ok=True)\n \n # Copy selected images to temp directory\n for img_path in original_image_paths:\n import shutil\n shutil.copy(img_path, temp_dir)\n \n # Process the temp directory\n normalize_image_sizes_biplet(\n input_dir=temp_dir,\n output_dir=processed_image_dir,\n size=square_size\n )\n \n # Clean up temp directory\n shutil.rmtree(temp_dir)\n \n # Get processed image paths\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n \n print(f\"\\nπΈ Processing {len(image_paths)} images (after biplet-square)\")\n print(f\"β οΈ Will use maximum {max_pairs} pairs to save memory\")\n \n # Step 2: DINO-based pair selection\n print(\"\\n\" + \"=\"*70)\n print(\"Step 2: DINO Pair Selection\")\n print(\"=\"*70)\n \n pairs = get_image_pairs_dino(image_paths, max_pairs=max_pairs)\n clear_memory()\n \n print(f\"β Using {len(pairs)} pairs for reconstruction\")\n \n # Step 3: MASt3R reconstruction\n print(\"\\n\" + \"=\"*70)\n print(\"Step 3: MASt3R Reconstruction\")\n print(\"=\"*70)\n \n device = Config.DEVICE\n model = load_mast3r_model(device)\n \n scene, mast3r_images = run_mast3r_pairs(\n model, image_paths, pairs, device,\n max_pairs=None # Already limited in get_image_pairs_dino\n )\n \n # Clear model from memory\n del model\n clear_memory()\n \n # Step 4: Extract COLMAP-compatible data\n print(\"\\n\" + \"=\"*70)\n print(\"Step 4: Converting to COLMAP Format\")\n print(\"=\"*70)\n \n # Extract COLMAP-compatible data with point limit\n pts3d, colors, cameras, poses = extract_colmap_data(\n scene, image_paths, max_points=max_points \n )\n\n # Clear scene from memory\n del scene, mast3r_images\n clear_memory()\n \n # Step 5: Save COLMAP reconstruction\n colmap_dir = os.path.join(output_dir, 'colmap')\n sparse_dir = save_colmap_reconstruction(\n pts3d, colors, cameras, poses, image_paths, colmap_dir\n )\n \n # Clear reconstruction data\n del pts3d, colors, cameras, poses\n clear_memory()\n \n # Step 6: Train Gaussian Splatting\n print(\"\\n\" + \"=\"*70)\n print(\"Step 6: Training Gaussian Splatting\")\n print(\"=\"*70)\n \n gs_output = train_gaussian_splatting(\n colmap_dir=colmap_dir,\n image_dir=processed_image_dir,\n output_dir=output_dir,\n iterations=iterations\n )\n \n print(\"\\n\" + \"=\"*70)\n print(\"β
Full Pipeline Successfully Completed!\")\n print(\"=\"*70)\n print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n \n return gs_output\n\n\nif __name__ == \"__main__\":\n IMAGE_DIR = \"/kaggle/input/two-dogs/bike15\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n \n gs_output = main_pipeline(\n image_dir=IMAGE_DIR,\n output_dir=OUTPUT_DIR,\n square_size=1024, \n iterations=1000, \n max_images=30,\n max_pairs=1000, \n max_points=1000000 \n )\n\n print(f\"\\n{'='*70}\")\n print(\"Pipeline completed successfully!\")\n print(f\"{'='*70}\")\n print(f\"Gaussian Splatting output: {gs_output}\")","metadata":{"execution":{"iopub.status.busy":"2026-01-26T17:26:04.877512Z","iopub.execute_input":"2026-01-26T17:26:04.877849Z","iopub.status.idle":"2026-01-26T17:26:04.896396Z","shell.execute_reply.started":"2026-01-26T17:26:04.877825Z","shell.execute_reply":"2026-01-26T17:26:04.895797Z"},"papermill":{"duration":0.016081,"end_time":"2026-01-20T01:07:23.727745","exception":false,"start_time":"2026-01-20T01:07:23.711664","status":"completed"},"tags":[],"trusted":true,"_kg_hide-output":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"## **3D Gaussian Splat Viewer**\n\nhttps://splat-three.vercel.app/?url=bike_mast3r_ps1.splat\n\nhttps://splat-three.vercel.app/?url=bike_mast3r_ps1.splat#[0.61,-0.35,0.71,0,0.26,0.93,0.24,0,-0.76,0.04,0.65,0,0.53,0.08,0.9,1]\n","metadata":{}},{"cell_type":"code","source":"","metadata":{"id":"VQsLeKY8Rl8Y","papermill":{"duration":0.154679,"end_time":"2026-01-20T01:22:29.976313","exception":false,"start_time":"2026-01-20T01:22:29.821634","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null}]}
|