Upload 3 files
Browse files
.gitattributes
CHANGED
|
@@ -58,3 +58,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
Movie_Frames_Gaussian_Splat_colab_oo.ipynb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
Movie_Frames_Gaussian_Splat_colab_oo.ipynb filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
3d-reconstruction-mast3r-w-ps1.ipynb filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
3d-reconstruction-mast3r-w-ps3.ipynb filter=lfs diff=lfs merge=lfs -text
|
3d-reconstruction-mast3r-w-ps1.ipynb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d4dddcc3d1b140f160c1a80992c04c6424aa14e1200e204e452b25ab040f689
|
| 3 |
+
size 24931102
|
3d-reconstruction-mast3r-w-ps2.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.12.12"},"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":49349,"databundleVersionId":5447706,"sourceType":"competition"}],"dockerImageVersionId":31259,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true},"papermill":{"default_parameters":{},"duration":967.270978,"end_time":"2026-01-20T01:22:34.649213","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-20T01:06:27.378235","version":"2.6.0"},"widgets":{"application/vnd.jupyter.widget-state+json":{"state":{"044f8e96b50e4c33b25e342091f4ec64":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"129b638c005640a591de9920f26201d1":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"2199c02789dc4f23accfd256dd3cac19":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2aedd953dfdf4ab98df88aefe1133503":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2d210fca524a4c9abcea1f361b86cc2c":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"33f1d82b482d4fa6b0a99cab070ca2ee":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_801fbba63af94d81bc548c40c3660ce4","IPY_MODEL_66eb589dd72c4555a561d3095df91257","IPY_MODEL_c11823a2992e4c33ad23d0b61b520751"],"layout":"IPY_MODEL_476a9de20d434d8d81d6177e50f1a6af","tabbable":null,"tooltip":null}},"40f2d021b0174597b5502a73cc88a808":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"476a9de20d434d8d81d6177e50f1a6af":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4c671a7ee42b4015aed241c0036c60b7":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_a661de9610b34f9cb36a026a4e15aae9","placeholder":"β","style":"IPY_MODEL_aec2ed624beb4f7b917880dc4b550370","tabbable":null,"tooltip":null,"value":"β346M/346Mβ[00:02<00:00,β246MB/s]"}},"52ddde5887ed4a3d83c8adebc13d2387":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5687e79b1d974c5cabfbeba43a3694c5":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_e212b430e00d4a5d967a385deecaab91","placeholder":"β","style":"IPY_MODEL_e20b857897f74db885a8dfc6d96887d5","tabbable":null,"tooltip":null,"value":"model.safetensors:β100%"}},"5859bcf5772e4820bec3683d25c9842a":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5687e79b1d974c5cabfbeba43a3694c5","IPY_MODEL_ae3558f346c14038a75a88bdce4d9fbd","IPY_MODEL_4c671a7ee42b4015aed241c0036c60b7"],"layout":"IPY_MODEL_c0e52e28ecda4e6696bcf3b7647cd6ed","tabbable":null,"tooltip":null}},"5c4e4442fa7a4f249b9d31d759f42dfa":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a33ea7c326fe4859a6eeb6324537e29d","IPY_MODEL_985ea3339fb84d28b9ad54cecc5ca023","IPY_MODEL_e32df049e2e74f7f9db8b8e029b03209"],"layout":"IPY_MODEL_2199c02789dc4f23accfd256dd3cac19","tabbable":null,"tooltip":null}},"62081223720d477b91f099e636f67ba2":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"66eb589dd72c4555a561d3095df91257":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_2d210fca524a4c9abcea1f361b86cc2c","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_db95f8aa3a684369afcd0a73fda7b405","tabbable":null,"tooltip":null,"value":436}},"6f552fcc4d28402d91491bfaf2989e81":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"71c6c07841974c91b9a490f34ad3f556":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"801fbba63af94d81bc548c40c3660ce4":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_71c6c07841974c91b9a490f34ad3f556","placeholder":"β","style":"IPY_MODEL_ea596f96884c4955a34cb895b3564ef1","tabbable":null,"tooltip":null,"value":"preprocessor_config.json:β100%"}},"898e4be9614645c1b11417befaa72cc7":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"985ea3339fb84d28b9ad54cecc5ca023":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_40f2d021b0174597b5502a73cc88a808","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2aedd953dfdf4ab98df88aefe1133503","tabbable":null,"tooltip":null,"value":548}},"a33ea7c326fe4859a6eeb6324537e29d":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_52ddde5887ed4a3d83c8adebc13d2387","placeholder":"β","style":"IPY_MODEL_e06705139cd04e1ea65f358a06c3f7cc","tabbable":null,"tooltip":null,"value":"config.json:β100%"}},"a661de9610b34f9cb36a026a4e15aae9":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ae3558f346c14038a75a88bdce4d9fbd":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_c4dbbad7fcbf40209cc53a5481508ca6","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_6f552fcc4d28402d91491bfaf2989e81","tabbable":null,"tooltip":null,"value":346345912}},"aec2ed624beb4f7b917880dc4b550370":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"c0e52e28ecda4e6696bcf3b7647cd6ed":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c11823a2992e4c33ad23d0b61b520751":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_044f8e96b50e4c33b25e342091f4ec64","placeholder":"β","style":"IPY_MODEL_129b638c005640a591de9920f26201d1","tabbable":null,"tooltip":null,"value":"β436/436β[00:00<00:00,β48.9kB/s]"}},"c4dbbad7fcbf40209cc53a5481508ca6":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"db95f8aa3a684369afcd0a73fda7b405":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e06705139cd04e1ea65f358a06c3f7cc":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"e20b857897f74db885a8dfc6d96887d5":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"e212b430e00d4a5d967a385deecaab91":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e32df049e2e74f7f9db8b8e029b03209":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_898e4be9614645c1b11417befaa72cc7","placeholder":"β","style":"IPY_MODEL_62081223720d477b91f099e636f67ba2","tabbable":null,"tooltip":null,"value":"β548/548β[00:00<00:00,β76.3kB/s]"}},"ea596f96884c4955a34cb895b3564ef1":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}}},"version_major":2,"version_minor":0}}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **3D Reconstruction MASt3R w/ps2** \n\n","metadata":{"id":"qDQLX3PArmh8","papermill":{"duration":0.003504,"end_time":"2026-01-20T01:06:31.022336","exception":false,"start_time":"2026-01-20T01:06:31.018832","status":"completed"},"tags":[]}},{"cell_type":"markdown","source":"**MASt3R is a learning-based, SfM-free 3D reconstruction method that directly regresses dense 3D geometry from image pairs, eliminating the need for feature matching, triangulation, and bundle adjustment.**\n\n","metadata":{}},{"cell_type":"markdown","source":"\n---\n\n### Pipeline Overview\n\nThis pipeline performs **end-to-end 3D reconstruction** from a set of images, replacing the traditional COLMAP SfM pipeline with **DINO + MASt3R**, and finally exporting results back into **COLMAP format** for downstream use (e.g. Gaussian Splatting).\n\n---\n\n### 1. Image Preprocessing (Biplet-Square Normalization)\n\n* Each input image is converted into **two square crops** (left/right or top/bottom).\n* This normalizes aspect ratios and increases overlap robustness without resizing distortions.\n\n---\n\n### 2. Image Pair Selection (DINO Global Features)\n\n* DINOv2 is used to extract **global image descriptors**.\n* Images are paired based on **top-K cosine similarity**, with a diversity-aware selection strategy.\n* This drastically reduces the number of pairs while preserving scene coverage and saving memory.\n\n---\n\n### 3. 3D Reconstruction with MASt3R\n\n* MASt3R replaces traditional keypoint detection, matching, and SfM.\n* Selected image pairs are processed to predict **dense correspondences and 3D structure**.\n* A global alignment step optimizes camera poses and point clouds into a coherent scene.\n\n---\n\n### 4. Conversion to COLMAP Format\n\n* The MASt3R scene is converted into **COLMAP-compatible outputs**:\n\n * `cameras.bin`, `images.bin`, `points3D.bin`\n * RGB images, depth maps, normal maps, and confidence masks\n* This enables compatibility with existing COLMAP-based tools and pipelines.\n\n---\n\n### 5. Visualization\n\n* The reconstructed 3D points are exported as a **PLY point cloud**.\n* Open3D is used for lightweight, CPU-based visualization.\n\n---\n\n### Key Characteristics\n\n* **Learning-based SfM** (no feature matching or bundle adjustment)\n* **Memory-aware design** (pair limiting, reduced resolution, aggressive cleanup)\n* **Drop-in replacement** for COLMAP reconstruction in modern pipelines\n* **Ready for Gaussian Splatting or neural rendering**\n\n---\n","metadata":{}},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Setup","metadata":{}},{"cell_type":"code","source":"import os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image, ImageFilter\nimport struct\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"class Config:\n # Feature extraction\n N_KEYPOINTS = 4096\n IMAGE_SIZE = 512\n\n # Pair selection - CRITICAL for memory\n GLOBAL_TOPK = 20\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\"\n\n MAST3R_MODEL = \"/kaggle/working/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n MAST3R_IMAGE_SIZE = 256 # should be %16=0\n\n # Device\n DEVICE = torch.device('cpu')","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n \n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n# ============================================================================\n# Environment Setup\n# ============================================================================\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"β Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_base_environment():\n \"\"\"Setup base Python environment\"\"\"\n print(\"\\n=== Setting up Base Environment ===\")\n \n # NumPy fix for Python 3.12\n print(\"\\nπ¦ Fixing NumPy...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n \n # PyTorch\n print(\"\\nπ¦ Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n \n # Core utilities\n print(\"\\nπ¦ Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\",\n \"scipy\", # for rotation conversions and image resizing\n \"psutil\" # for memory monitoring\n ])\n \n # Transformers for DINO\n print(\"\\nπ¦ Installing transformers...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n \n # pycolmap for COLMAP format\n print(\"\\nπ¦ Installing pycolmap...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n \n print(\"β Base environment setup complete!\")\n\n\ndef setup_mast3r():\n \"\"\"Install and setup MASt3R\"\"\"\n print(\"\\n=== Setting up MASt3R ===\")\n \n os.chdir('/kaggle/working')\n \n # Remove existing installation\n if os.path.exists('mast3r'):\n print(\"Removing existing MASt3R installation...\")\n os.system('rm -rf mast3r')\n \n # Clone repository\n print(\"Cloning MASt3R repository...\")\n os.system('git clone --recursive https://github.com/naver/mast3r')\n os.chdir('/kaggle/working/mast3r')\n \n # Check dust3r directory\n print(\"Checking dust3r structure...\")\n os.system('ls -la dust3r/')\n \n # Install dust3r\n print(\"Installing dust3r...\")\n os.system('cd dust3r && python -m pip install -e .')\n \n # Install croco\n print(\"Installing croco...\")\n os.system('cd dust3r/croco && python -m pip install -e .')\n \n # Install requirements\n print(\"Installing MASt3R requirements...\")\n os.system('pip install -r requirements.txt')\n \n # Download model weights\n print(\"Downloading model weights...\")\n os.system('mkdir -p checkpoints')\n os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n \n # Install additional dependencies\n print(\"Installing additional dependencies...\")\n os.system('pip install trimesh matplotlib roma')\n \n # Add to path\n sys.path.insert(0, '/kaggle/working/mast3r')\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n \n # Verification\n print(\"\\nπ Verifying MASt3R installation...\")\n try:\n from mast3r.model import AsymmetricMASt3R\n print(\" β MASt3R import: OK\")\n except Exception as e:\n print(f\" β MASt3R import failed: {e}\")\n raise\n \n print(\"β MASt3R setup complete!\")\n\ndef setup_gaussian_splatting():\n \"\"\"Setup Gaussian Splatting\"\"\"\n print(\"\\n=== Setting up Gaussian Splatting ===\")\n \n os.chdir('/kaggle/working')\n \n WORK_DIR = \"gaussian-splatting\"\n \n if not os.path.exists(WORK_DIR):\n print(\"Cloning Gaussian Splatting repository...\")\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"β Repository already exists\")\n \n os.chdir(WORK_DIR)\n \n # Install requirements\n print(\"Installing Gaussian Splatting requirements...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n \n # Build submodules\n print(\"\\nπ¦ Building Gaussian Splatting submodules...\")\n \n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n \n for name, repo in submodules.items():\n print(f\"\\nπ¦ Installing {name}...\")\n path = os.path.join(\"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n \n print(\"β Gaussian Splatting setup complete!\")","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"setup_base_environment()\nclear_memory()\n\nsetup_mast3r()\nclear_memory()\n","metadata":{"trusted":true,"_kg_hide-output":true,"execution":{"iopub.status.busy":"2026-01-30T00:49:33.247792Z","iopub.execute_input":"2026-01-30T00:49:33.249667Z","iopub.status.idle":"2026-01-30T00:52:18.144506Z","shell.execute_reply.started":"2026-01-30T00:49:33.249597Z","shell.execute_reply":"2026-01-30T00:52:18.143417Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 0: Biplet-Square Normalization (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n converted_count = 0\n size_stats = {}\n\n for img_file in sorted(os.listdir(input_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n print(f\" β {img_file}: {original_size} β 2 square images generated\")\n\n except Exception as e:\n print(f\" β Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true,"execution":{"iopub.status.busy":"2026-01-30T00:52:18.145765Z","iopub.execute_input":"2026-01-30T00:52:18.146252Z","iopub.status.idle":"2026-01-30T00:52:18.156801Z","shell.execute_reply.started":"2026-01-30T00:52:18.146218Z","shell.execute_reply":"2026-01-30T00:52:18.155555Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def generate_two_crops(img, size):\n \"\"\"\n Generates two square crops from an image.\n \"\"\"\n # If size is a tuple or list, extract the first value\n if isinstance(size, (tuple, list)):\n size = size[0]\n \n width, height = img.size\n crops = {}\n \n if width >= height:\n # Landscape: Split into left and right squares\n box_left = (0, 0, height, height)\n box_right = (width - height, 0, width, height)\n crops['left'] = img.crop(box_left).resize((size, size), Image.LANCZOS)\n crops['right'] = img.crop(box_right).resize((size, size), Image.LANCZOS)\n else:\n # Portrait: Split into top and bottom squares\n box_top = (0, 0, width, width)\n box_bottom = (0, height - width, width, height)\n crops['top'] = img.crop(box_top).resize((size, size), Image.LANCZOS)\n crops['bottom'] = img.crop(box_bottom).resize((size, size), Image.LANCZOS)\n \n return crops","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-30T00:52:18.158175Z","iopub.execute_input":"2026-01-30T00:52:18.158688Z","iopub.status.idle":"2026-01-30T00:52:18.187358Z","shell.execute_reply.started":"2026-01-30T00:52:18.158643Z","shell.execute_reply":"2026-01-30T00:52:18.186124Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 1: DINO-based Pair Selection (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors with memory management\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n batch_size = 4 # Small batch to save memory\n \n for i in tqdm(range(0, len(image_paths), batch_size)):\n batch_paths = image_paths[i:i+batch_size]\n batch_imgs = []\n \n for img_path in batch_paths:\n img = load_torch_image(img_path, device)\n batch_imgs.append(img)\n \n batch_tensor = torch.cat(batch_imgs, dim=0)\n \n with torch.no_grad():\n inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n \n # Clear batch memory\n del batch_tensor, inputs, outputs, desc\n clear_memory()\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model, processor\n clear_memory()\n \n print(\"After DINO extraction:\")\n get_memory_info()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n # Remove duplicates\n pairs = list(set(pairs))\n \n return pairs\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n \"\"\"\n Select diverse pairs to ensure good image coverage\n Strategy: Select pairs that maximize image coverage\n \"\"\"\n import random\n random.seed(42)\n \n if len(pairs) <= max_pairs:\n return pairs\n \n print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n \n # Count how many times each image appears in pairs\n image_counts = {i: 0 for i in range(num_images)}\n for i, j in pairs:\n image_counts[i] += 1\n image_counts[j] += 1\n \n # Sort pairs by: prefer pairs with less-connected images\n def pair_score(pair):\n i, j = pair\n # Lower score = images appear in fewer pairs = more diverse\n return image_counts[i] + image_counts[j]\n \n pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n pairs_scored.sort(key=lambda x: x[1])\n \n # Select pairs greedily to maximize coverage\n selected = []\n selected_images = set()\n \n # Phase 1: Select pairs that add new images (greedy coverage)\n for pair, score in pairs_scored:\n if len(selected) >= max_pairs:\n break\n i, j = pair\n # Prefer pairs that include new images\n if i not in selected_images or j not in selected_images:\n selected.append(pair)\n selected_images.add(i)\n selected_images.add(j)\n \n # Phase 2: Fill remaining slots with high-similarity pairs\n if len(selected) < max_pairs:\n remaining = [p for p, s in pairs_scored if p not in selected]\n random.shuffle(remaining)\n selected.extend(remaining[:max_pairs - len(selected)])\n \n print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n \n return selected\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n \"\"\"DINO-based pair selection with intelligent limiting\"\"\"\n device = Config.DEVICE\n\n # DINO global features\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from DINO: {len(pairs)}\")\n \n # Apply intelligent pair selection if limit specified\n if max_pairs and len(pairs) > max_pairs:\n pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n \n return pairs\n\n# ============================================================================\n# Step 2: MASt3R Reconstruction (REPLACES ALIKED/LIGHTGLUE/COLMAP)\n# ============================================================================\n\ndef load_mast3r_model(device='cuda'):\n \"\"\"Load MASt3R model\"\"\"\n from mast3r.model import AsymmetricMASt3R\n \n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n \n print(f\"β MASt3R model loaded on {device}\")\n return model\n\ndef load_images_for_mast3r(image_paths, size=224):\n \"\"\"Load images using DUSt3R's format with reduced size\"\"\"\n print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n \n from dust3r.utils.image import load_images\n \n # Load images using DUSt3R's loader with reduced size\n images = load_images(image_paths, size=size, verbose=True)\n \n return images\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, max_pairs=None):\n \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n \n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n \n # Limit number of pairs if specified\n if max_pairs and len(pairs) > max_pairs:\n print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n # Select pairs more evenly distributed\n step = max(1, len(pairs) // max_pairs)\n pairs = pairs[::step][:max_pairs]\n \n print(f\"Processing {len(pairs)} pairs...\")\n \n # Load images in smaller size\n print(f\"Loading {len(image_paths)} images at {Config.MAST3R_IMAGE_SIZE}x{Config.MAST3R_IMAGE_SIZE}...\")\n images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n \n print(f\"Loaded {len(images)} images\")\n print(\"After loading images:\")\n get_memory_info()\n \n # Create all image pairs at once\n print(f\"Creating {len(pairs)} image pairs...\")\n mast3r_pairs = []\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n mast3r_pairs.append((images[idx1], images[idx2]))\n \n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n \n # Run inference (this returns the dict format we need)\n output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n \n # Clear pairs from memory\n del mast3r_pairs\n clear_memory()\n \n print(\"β MASt3R inference complete\")\n print(\"After inference:\")\n get_memory_info()\n \n # Global alignment\n print(\"Running global alignment...\")\n scene = global_aligner(\n output, \n device=device, \n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n \n # Clear output after creating scene\n del output\n clear_memory()\n \n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=150, # Reduced from 300\n schedule='cosine', \n lr=0.01\n )\n \n print(f\"β Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n \n return scene, images","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true,"execution":{"iopub.status.busy":"2026-01-30T00:52:18.189125Z","iopub.execute_input":"2026-01-30T00:52:18.189514Z","iopub.status.idle":"2026-01-30T00:52:18.219749Z","shell.execute_reply.started":"2026-01-30T00:52:18.189482Z","shell.execute_reply":"2026-01-30T00:52:18.218291Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Process2","metadata":{}},{"cell_type":"markdown","source":"## **The MASt3R scene is converted into COLMAP-compatible outputs.**","metadata":{}},{"cell_type":"code","source":"#process2_15.py\nimport struct\nimport numpy as np\nfrom pathlib import Path\nfrom PIL import Image\nimport os\nimport torch\n\ndef rotmat_to_qvec(R):\n \"\"\"Convert Rotation Matrix to Quaternion (w, x, y, z)\"\"\"\n R = np.asarray(R, dtype=np.float64)\n trace = np.trace(R)\n if trace > 0:\n s = 0.5 / np.sqrt(trace + 1.0)\n w, x, y, z = 0.25 / s, (R[2, 1] - R[1, 2]) * s, (R[0, 2] - R[2, 0]) * s, (R[1, 0] - R[0, 1]) * s\n elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n w, x, y, z = (R[2, 1] - R[1, 2]) / s, 0.25 * s, (R[0, 1] + R[1, 0]) / s, (R[0, 2] + R[2, 0]) / s\n elif R[1, 1] > R[2, 2]:\n s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n w, x, y, z = (R[0, 2] - R[2, 0]) / s, (R[0, 1] + R[1, 0]) / s, 0.25 * s, (R[1, 2] + R[2, 1]) / s\n else:\n s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n w, x, y, z = (R[1, 0] - R[0, 1]) / s, (R[0, 2] + R[2, 0]) / s, (R[1, 2] + R[2, 1]) / s, 0.25 * s\n q = np.array([w, x, y, z])\n return q / np.linalg.norm(q)\n\ndef extract_all_data(scene, image_paths, conf_threshold=1.5):\n \"\"\"\n Extract 3D points, colors, confidence, and camera parameters from the scene.\n \"\"\"\n print(\"\\n=== Extracting Data from Scene ===\")\n \n # Basic data from scene\n pts3d_list = scene.get_pts3d()\n im_poses = scene.get_im_poses().detach().cpu().numpy()\n focals = scene.get_focals().detach().cpu().numpy()\n pp = scene.get_principal_points().detach().cpu().numpy()\n im_conf = scene.im_conf\n\n all_pts, all_cols, all_conf = [], [], []\n cameras_dict = {}\n\n for i, img_path in enumerate(image_paths):\n img_name = os.path.basename(img_path)\n img_raw = Image.open(img_path).convert('RGB')\n W_orig, H_orig = img_raw.size\n \n # Get 3D points and confidence\n pts = pts3d_list[i].detach().cpu().numpy()\n conf = im_conf[i].detach().cpu().numpy()\n H_pts, W_pts = pts.shape[:2]\n\n # Sample colors at the same resolution as 3D points\n img_res = img_raw.resize((W_pts, H_pts), Image.BILINEAR)\n cols = np.array(img_res)\n\n # Filter by confidence threshold\n mask = conf > conf_threshold\n all_pts.append(pts[mask])\n all_cols.append(cols[mask])\n all_conf.append(conf[mask])\n\n # Camera parameter calculation with scale correction\n scale = W_orig / W_pts\n fx = float(focals[i, 0] if focals.ndim > 1 else focals[i]) * scale\n fy = float(focals[i, 1] if (focals.ndim > 1 and focals.shape[1] > 1) else (focals[i, 0] if focals.ndim > 1 else focals[i])) * scale\n cx, cy = pp[i, 0] * scale, pp[i, 1] * scale\n\n cameras_dict[img_name] = {\n 'id': i + 1,\n 'w': W_orig, 'h': H_orig,\n 'params': (fx, fy, cx, cy),\n 'pose_w2c': np.linalg.inv(im_poses[i]) # World-to-Camera\n }\n print(f\" Image {i+1}: {img_name} ({len(pts[mask]):,} points extracted)\")\n\n return (np.concatenate(all_pts), np.concatenate(all_cols), \n np.concatenate(all_conf), cameras_dict)\n\ndef save_colmap_binary(pts3d, colors, conf, cameras_dict, output_dir):\n \"\"\"\n Save data in COLMAP binary format (.bin).\n \"\"\"\n path = Path(output_dir)\n path.mkdir(parents=True, exist_ok=True)\n\n # 1. cameras.bin (PINHOLE model)\n with open(path / 'cameras.bin', 'wb') as f:\n f.write(struct.pack('Q', len(cameras_dict)))\n for name, cam in cameras_dict.items():\n f.write(struct.pack('IiQQ', cam['id'], 1, cam['w'], cam['h']))\n f.write(struct.pack('dddd', *cam['params']))\n\n # 2. images.bin\n with open(path / 'images.bin', 'wb') as f:\n f.write(struct.pack('Q', len(cameras_dict)))\n for name, cam in cameras_dict.items():\n q = rotmat_to_qvec(cam['pose_w2c'][:3, :3])\n t = cam['pose_w2c'][:3, 3]\n f.write(struct.pack('I', cam['id']))\n f.write(struct.pack('dddd', *q))\n f.write(struct.pack('ddd', *t))\n f.write(struct.pack('I', cam['id']))\n f.write(name.encode('utf-8') + b'\\x00')\n f.write(struct.pack('Q', 0))\n\n # 3. points3D.bin (Colored)\n with open(path / 'points3D.bin', 'wb') as f:\n f.write(struct.pack('Q', len(pts3d)))\n for i, (pt, col, cf) in enumerate(zip(pts3d, colors, conf)):\n f.write(struct.pack('Q', i + 1))\n f.write(struct.pack('ddd', *pt))\n f.write(struct.pack('BBB', *col)) # RGB\n f.write(struct.pack('d', 1.0 / max(cf, 0.01))) # Error estimate\n f.write(struct.pack('Q', 0))\n\n print(f\"\\nβ COLMAP binary files exported to: {output_dir}\")\n\ndef write_colored_ply(pts3d, colors, output_path):\n \"\"\"Export a colored PLY file for visualization.\"\"\"\n with open(output_path, 'w') as f:\n f.write(f\"ply\\nformat ascii 1.0\\nelement vertex {len(pts3d)}\\n\"\n \"property float x\\nproperty float y\\nproperty float z\\n\"\n \"property uchar red\\nproperty uchar green\\nproperty uchar blue\\n\"\n \"end_header\\n\")\n for pt, col in zip(pts3d, colors):\n f.write(f\"{pt[0]} {pt[1]} {pt[2]} {int(col[0])} {int(col[1])} {int(col[2])}\\n\")\n print(f\"β PLY file saved: {output_path}\")\n\ndef create_colmap_bins(scene, image_paths, output_dir, conf_threshold=1.5):\n \"\"\"\n Main entry point to generate COLMAP reconstruction data.\n \"\"\"\n print(\"=\"*60)\n print(\"DUST3R TO COLMAP RECONSTRUCTION\")\n print(\"=\"*60)\n\n # 1. Extraction\n pts3d, colors, conf, cameras_dict = extract_all_data(scene, image_paths, conf_threshold)\n\n # 2. COLMAP Binary Output\n save_colmap_binary(pts3d, colors, conf, cameras_dict, output_dir)\n\n # 3. PLY Output (Optional verification)\n write_colored_ply(pts3d, colors, Path(output_dir) / \"point_cloud.ply\")\n\n print(\"\\nβ PROCESS COMPLETE\")\n return cameras_dict, pts3d, conf, colors\n\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-01T09:15:51.311391Z","iopub.execute_input":"2026-02-01T09:15:51.311591Z","iopub.status.idle":"2026-02-01T09:15:51.315229Z","shell.execute_reply.started":"2026-02-01T09:15:51.311570Z","shell.execute_reply":"2026-02-01T09:15:51.314544Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# bins to ply","metadata":{}},{"cell_type":"code","source":"def convert_colmap_bin_to_ply(sparse_dir, output_ply_path):\n\n import pycolmap\n from plyfile import PlyData, PlyElement\n import numpy as np\n \n print(f\"\\n=== Converting COLMAP bin to PLY ===\")\n\n reconstruction = pycolmap.Reconstruction(str(sparse_dir))\n \n print(f\"Loaded reconstruction:\")\n print(f\" - {len(reconstruction.cameras)} cameras\")\n print(f\" - {len(reconstruction.images)} images\")\n print(f\" - {len(reconstruction.points3D)} points\")\n \n if len(reconstruction.points3D) == 0:\n print(\"β No 3D points in reconstruction!\")\n return 0\n\n points = []\n colors = []\n \n for point3D_id, point3D in reconstruction.points3D.items():\n points.append(point3D.xyz)\n colors.append(point3D.color)\n \n points = np.array(points)\n colors = np.array(colors)\n \n print(f\"\\nPoint cloud statistics:\")\n print(f\" Total points: {len(points)}\")\n print(f\" X range: [{points[:, 0].min():.3f}, {points[:, 0].max():.3f}]\")\n print(f\" Y range: [{points[:, 1].min():.3f}, {points[:, 1].max():.3f}]\")\n print(f\" Z range: [{points[:, 2].min():.3f}, {points[:, 2].max():.3f}]\")\n\n vertices = np.array(\n [(p[0], p[1], p[2], c[0], c[1], c[2]) \n for p, c in zip(points, colors)],\n dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'),\n ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]\n )\n \n el = PlyElement.describe(vertices, 'vertex')\n PlyData([el]).write(output_ply_path)\n \n print(f\"β Saved PLY file to {output_ply_path}\")\n \n return len(points)\n\n\n'''\ncolmap_output_dir='/kaggle/working/output/colmap'\nsparse_dir = os.path.join(colmap_output_dir, 'sparse', '0')\nply_path = os.path.join(colmap_output_dir, 'point_cloud.ply')\nnum_points = convert_colmap_bin_to_ply(sparse_dir, ply_path)\n'''\n","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# main_pipeline","metadata":{}},{"cell_type":"code","source":"# --- Keeping the initial image processing and model inference as is ---\nimage_dir = \"/kaggle/input/image-matching-challenge-2023/train/haiper/bike/images\"\noutput_dir = \"/kaggle/working/output\"\nsquare_size = 1024 \nmax_pairs = 1000 \nmax_points = 1000000 \nos.makedirs(output_dir, exist_ok=True)\nprocessed_image_dir = os.path.join(output_dir, \"processed_images\")\n\n# Image normalization\nnormalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=processed_image_dir,\n size=square_size\n)\n\nprocessed_image_paths = sorted([\n os.path.join(processed_image_dir, f) \n for f in os.listdir(processed_image_dir) \n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n])\n\n# Create pairs and execute MASt3R\npairs = get_image_pairs_dino(processed_image_paths, max_pairs=max_pairs)\nclear_memory()\n\ndevice = Config.DEVICE\nmodel = load_mast3r_model(device)\nscene, mast3r_images = run_mast3r_pairs(\n model, processed_image_paths, pairs, device,\n max_pairs=None\n)\n\n# Release model to free up memory\ndel model\nclear_memory()\n\n# ========================================================\n# REVISED: Processing everything at once with new functions\n# ========================================================\n\n# 1. Set output directory (COLMAP standard sparse/0 format)\ncolmap_sparse_dir = os.path.join(output_dir, \"colmap/sparse/0\")\n\n# 2. Extract coordinates, colors, and camera params, then save COLMAP binaries\n# This function handles color extraction (matching coordinates) and BIN file export simultaneously\ncameras_dict, pts3d, confidence, colors = create_colmap_bins(\n scene=scene, \n image_paths=processed_image_paths, \n output_dir=colmap_sparse_dir, \n conf_threshold=1.5\n)\n\n# 3. Finally, delete the scene object and clear memory\ndel scene\nclear_memory()\n\n# 4. Verify results (The PLY file is also saved within create_colmap_bins)\nprint(f\"β COLMAP binary files and colored PLY created at: {colmap_sparse_dir}\")\nprint(f\"β Total points processed: {len(pts3d):,}\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Ply Viewer","metadata":{}},{"cell_type":"code","source":"!pip install open3d","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-30T00:53:03.941302Z","iopub.status.idle":"2026-01-30T00:53:03.941728Z","shell.execute_reply.started":"2026-01-30T00:53:03.941523Z","shell.execute_reply":"2026-01-30T00:53:03.941556Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\"\"\"\nPLY Viewer for Kaggle Notebook\nDisplay PLY files from /kaggle/working directory\n\"\"\"\n\nfrom IPython.display import HTML, display\nimport base64\n\ndef display_ply_viewer(ply_file_path):\n \"\"\"\n Display PLY file in Kaggle notebook\n \n Args:\n ply_file_path: Path to PLY file (e.g., '/kaggle/working/model.ply')\n \"\"\"\n \n # Read PLY file and encode to Base64\n with open(ply_file_path, 'rb') as f:\n ply_data = f.read()\n ply_base64 = base64.b64encode(ply_data).decode('utf-8')\n \n # Create HTML viewer\n html_content = f\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"UTF-8\">\n <title>PLY Viewer</title>\n <style>\n body {{ margin: 0; font-family: Arial, sans-serif; }}\n #container {{ width: 100%; height: 600px; position: relative; }}\n #info {{\n position: absolute;\n top: 10px;\n left: 10px;\n background: rgba(0,0,0,0.7);\n color: white;\n padding: 10px;\n border-radius: 5px;\n font-size: 12px;\n z-index: 100;\n }}\n .controls {{\n position: absolute;\n top: 10px;\n right: 10px;\n background: rgba(0,0,0,0.7);\n padding: 10px;\n border-radius: 5px;\n z-index: 100;\n }}\n .button {{\n background: #4CAF50;\n color: white;\n border: none;\n padding: 8px 12px;\n margin: 2px;\n border-radius: 3px;\n cursor: pointer;\n font-size: 13px;\n }}\n .button:hover {{\n background: #45a049;\n }}\n </style>\n </head>\n <body>\n <div id=\"container\">\n <div id=\"info\">Loading...</div>\n <div class=\"controls\">\n <button id=\"reset-view\" class=\"button\">Reset View</button>\n </div>\n </div>\n\n <script type=\"importmap\">\n {{\n \"imports\": {{\n \"three\": \"https://unpkg.com/three@0.160.0/build/three.module.js\",\n \"three/examples/jsm/loaders/PLYLoader.js\": \"https://unpkg.com/three@0.160.0/examples/jsm/loaders/PLYLoader.js\",\n \"three/examples/jsm/controls/OrbitControls.js\": \"https://unpkg.com/three@0.160.0/examples/jsm/controls/OrbitControls.js\"\n }}\n }}\n </script>\n <script type=\"module\">\n import * as THREE from 'three';\n import {{ PLYLoader }} from 'three/examples/jsm/loaders/PLYLoader.js';\n import {{ OrbitControls }} from 'three/examples/jsm/controls/OrbitControls.js';\n\n let scene, camera, renderer, controls;\n let currentPointCloud = null;\n\n function init() {{\n const container = document.getElementById('container');\n \n scene = new THREE.Scene();\n scene.background = new THREE.Color(0x1a1a1a);\n\n camera = new THREE.PerspectiveCamera(60, container.clientWidth / container.clientHeight, 0.1, 10000);\n camera.position.set(5, 5, 10);\n\n renderer = new THREE.WebGLRenderer({{ antialias: true }});\n renderer.setSize(container.clientWidth, container.clientHeight);\n container.appendChild(renderer.domElement);\n\n controls = new OrbitControls(camera, renderer.domElement);\n controls.enableDamping = true;\n controls.dampingFactor = 0.05;\n\n const ambientLight = new THREE.AmbientLight(0xffffff, 0.6);\n scene.add(ambientLight);\n\n const directionalLight = new THREE.DirectionalLight(0xffffff, 0.8);\n directionalLight.position.set(10, 20, 15);\n scene.add(directionalLight);\n\n const gridHelper = new THREE.GridHelper(100, 50, 0x444444, 0x222222);\n scene.add(gridHelper);\n\n document.getElementById('reset-view').addEventListener('click', resetView);\n\n loadPLY();\n animate();\n }}\n\n function loadPLY() {{\n const plyBase64 = '{ply_base64}';\n const binaryString = atob(plyBase64);\n const bytes = new Uint8Array(binaryString.length);\n for (let i = 0; i < binaryString.length; i++) {{\n bytes[i] = binaryString.charCodeAt(i);\n }}\n\n const loader = new PLYLoader();\n try {{\n const geometry = loader.parse(bytes.buffer);\n displayGeometry(geometry);\n }} catch (error) {{\n document.getElementById('info').innerHTML = 'Error: ' + error.message;\n }}\n }}\n\n function displayGeometry(geometry) {{\n const hasColors = geometry.attributes.color !== undefined;\n \n // Fixed point size: 0.002\n const material = new THREE.PointsMaterial({{\n size: 0.002,\n vertexColors: hasColors,\n sizeAttenuation: true\n }});\n\n if (!hasColors) {{\n material.color = new THREE.Color(0x00aaff);\n }}\n\n const pointCloud = new THREE.Points(geometry, material);\n scene.add(pointCloud);\n currentPointCloud = pointCloud;\n\n geometry.computeBoundingBox();\n const bbox = geometry.boundingBox;\n const center = new THREE.Vector3();\n bbox.getCenter(center);\n const size = bbox.getSize(new THREE.Vector3());\n const maxDim = Math.max(size.x, size.y, size.z);\n\n const fov = camera.fov * (Math.PI / 180);\n let cameraDistance = Math.abs(maxDim / Math.tan(fov / 2)) * 1.5;\n \n camera.position.set(cameraDistance, cameraDistance * 0.7, cameraDistance);\n controls.target.copy(center);\n controls.update();\n\n const pointCount = geometry.attributes.position.count;\n document.getElementById('info').innerHTML = \n `<strong>Point Cloud</strong><br>\n Points: ${{pointCount.toLocaleString()}}<br>\n Size: (${{size.x.toFixed(2)}}, ${{size.y.toFixed(2)}}, ${{size.z.toFixed(2)}})`;\n }}\n\n function resetView() {{\n if (currentPointCloud) {{\n currentPointCloud.geometry.computeBoundingBox();\n const bbox = currentPointCloud.geometry.boundingBox;\n const center = new THREE.Vector3();\n bbox.getCenter(center);\n const size = bbox.getSize(new THREE.Vector3());\n const maxDim = Math.max(size.x, size.y, size.z);\n \n const fov = camera.fov * (Math.PI / 180);\n let cameraDistance = Math.abs(maxDim / Math.tan(fov / 2)) * 1.5;\n \n camera.position.set(cameraDistance, cameraDistance * 0.7, cameraDistance);\n controls.target.copy(center);\n controls.update();\n }}\n }}\n\n function animate() {{\n requestAnimationFrame(animate);\n controls.update();\n renderer.render(scene, camera);\n }}\n\n init();\n </script>\n </body>\n </html>\n \"\"\"\n \n display(HTML(html_content))\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-30T00:53:03.944313Z","iopub.status.idle":"2026-01-30T00:53:03.944605Z","shell.execute_reply.started":"2026-01-30T00:53:03.944458Z","shell.execute_reply":"2026-01-30T00:53:03.944483Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# you can change position and zoom ratio.\ndisplay_ply_viewer('/kaggle/working/output/colmap/sparse/0/point_cloud.ply')","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-30T00:53:03.946662Z","iopub.status.idle":"2026-01-30T00:53:03.947126Z","shell.execute_reply.started":"2026-01-30T00:53:03.946897Z","shell.execute_reply":"2026-01-30T00:53:03.946917Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# Process Comparison","metadata":{}},{"cell_type":"markdown","source":"### Comparison Table: process1, 2, and 3\n\n| Comparison Item | process1.py | process2.py | process3.py |\n| --- | --- | --- | --- |\n| **Intrinsics (Focal Length)** | Uses **scaled MASt3R estimates** | Uses **scaled MASt3R estimates** (supports iso/anisotropic) | **Simplified calculation** based on image size (`max(w, h) * 1.2`) |\n| **Intrinsics (Principal Point)** | Uses **scaled MASt3R estimates** | Uses **scaled MASt3R estimates** | Fixed to the **image center** |\n| **Camera Pose (Extrinsics)** | **Inverse matrix transform** of MASt3R poses (w2c) | **Inverse matrix transform** of MASt3R poses (w2c) | **Custom pose estimation** based on the median of 3D points |\n| **3D Point Extraction & Filtering** | Random sampling (up to 1M pts) and NaN/Inf removal | Filtering based on **confidence threshold** (default 1.5) | **Confidence filtering** and sampling 10k points per image |\n| **Color Information** | Colors from resized source images | Colors matching only filtered 3D points | Directly from scene image data |\n| **Primary Output Files** | COLMAP sparse reconstruction binaries (cameras, images, points3D) | COLMAP sparse reconstruction binaries (with actual RGB colors) | Sparse binaries + **Depth and Normal maps** |\n| **Main Use Case / Features** | Faithful conversion of MASt3R geometry to COLMAP | Emphasis on confidence filtering and accurate color extraction | Designed for integration with **COLMAP Dense Reconstruction** |\n\n---\n\n### Key Differences and Insights\n\n1. **Camera Model Accuracy**\n* **process1** and **process2** accurately reflect MASt3R's output by scaling the estimated focal length and principal point to the image dimensions.\n* **process3** uses a simplified pinhole model where camera parameters are derived from a fixed formula.\n\n\n2. **Pose Estimation Approach**\n* **process1** and **process2** calculate the inverse of the \"camera-to-world\" matrix provided by MASt3R to fit the \"world-to-camera\" format required by COLMAP.\n* **process3** employs a unique method of determining translation vectors based on the spatial distribution (median) of the generated 3D point cloud.\n\n\n3. **Point Cloud Quality and Sampling**\n* **process1** is designed to handle a high volume of points (up to 1 million).\n* **process2** and **process3** prioritize data quality by using MASt3R's \"confidence\" scores to filter out unreliable points.\n\n\n4. **Data Output Depth**\n* **process3** provides the most comprehensive output, generating **depth maps and normal maps** in binary format. This makes it specifically optimized for COLMAPβs stereo pipeline, facilitating detailed 3D modeling beyond just sparse reconstruction.\n","metadata":{}},{"cell_type":"markdown","source":"https://www.kaggle.com/code/stpeteishii/3d-reconstruction-mast3r-w-ps1<br>\nhttps://www.kaggle.com/code/stpeteishii/3d-reconstruction-mast3r-w-ps2<br>\nhttps://www.kaggle.com/code/stpeteishii/3d-reconstruction-mast3r-w-ps3","metadata":{}}]}
|
3d-reconstruction-mast3r-w-ps3.ipynb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d4bc554b8ffe5f09bfa0eac7d031ea42cf25559b9d79bbc0993422d4470de407
|
| 3 |
+
size 16404507
|