stpete2 commited on
Commit
01b2674
Β·
verified Β·
1 Parent(s): 0e490b8

Upload bike-biplet-dino-mast3r-ps3-ply-01.ipynb

Browse files
bike-biplet-dino-mast3r-ps3-ply-01.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3","language":"python"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.12.12"},"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":14628798,"sourceType":"datasetVersion","datasetId":1429416}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false},"papermill":{"default_parameters":{},"duration":967.270978,"end_time":"2026-01-20T01:22:34.649213","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-20T01:06:27.378235","version":"2.6.0"},"widgets":{"application/vnd.jupyter.widget-state+json":{"state":{"044f8e96b50e4c33b25e342091f4ec64":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"129b638c005640a591de9920f26201d1":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"2199c02789dc4f23accfd256dd3cac19":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2aedd953dfdf4ab98df88aefe1133503":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"2d210fca524a4c9abcea1f361b86cc2c":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"33f1d82b482d4fa6b0a99cab070ca2ee":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_801fbba63af94d81bc548c40c3660ce4","IPY_MODEL_66eb589dd72c4555a561d3095df91257","IPY_MODEL_c11823a2992e4c33ad23d0b61b520751"],"layout":"IPY_MODEL_476a9de20d434d8d81d6177e50f1a6af","tabbable":null,"tooltip":null}},"40f2d021b0174597b5502a73cc88a808":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"476a9de20d434d8d81d6177e50f1a6af":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4c671a7ee42b4015aed241c0036c60b7":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_a661de9610b34f9cb36a026a4e15aae9","placeholder":"​","style":"IPY_MODEL_aec2ed624beb4f7b917880dc4b550370","tabbable":null,"tooltip":null,"value":" 346M/346M [00:02&lt;00:00, 246MB/s]"}},"52ddde5887ed4a3d83c8adebc13d2387":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5687e79b1d974c5cabfbeba43a3694c5":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_e212b430e00d4a5d967a385deecaab91","placeholder":"​","style":"IPY_MODEL_e20b857897f74db885a8dfc6d96887d5","tabbable":null,"tooltip":null,"value":"model.safetensors: 100%"}},"5859bcf5772e4820bec3683d25c9842a":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_5687e79b1d974c5cabfbeba43a3694c5","IPY_MODEL_ae3558f346c14038a75a88bdce4d9fbd","IPY_MODEL_4c671a7ee42b4015aed241c0036c60b7"],"layout":"IPY_MODEL_c0e52e28ecda4e6696bcf3b7647cd6ed","tabbable":null,"tooltip":null}},"5c4e4442fa7a4f249b9d31d759f42dfa":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HBoxModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_a33ea7c326fe4859a6eeb6324537e29d","IPY_MODEL_985ea3339fb84d28b9ad54cecc5ca023","IPY_MODEL_e32df049e2e74f7f9db8b8e029b03209"],"layout":"IPY_MODEL_2199c02789dc4f23accfd256dd3cac19","tabbable":null,"tooltip":null}},"62081223720d477b91f099e636f67ba2":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"66eb589dd72c4555a561d3095df91257":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_2d210fca524a4c9abcea1f361b86cc2c","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_db95f8aa3a684369afcd0a73fda7b405","tabbable":null,"tooltip":null,"value":436}},"6f552fcc4d28402d91491bfaf2989e81":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"71c6c07841974c91b9a490f34ad3f556":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"801fbba63af94d81bc548c40c3660ce4":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_71c6c07841974c91b9a490f34ad3f556","placeholder":"​","style":"IPY_MODEL_ea596f96884c4955a34cb895b3564ef1","tabbable":null,"tooltip":null,"value":"preprocessor_config.json: 100%"}},"898e4be9614645c1b11417befaa72cc7":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"985ea3339fb84d28b9ad54cecc5ca023":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_40f2d021b0174597b5502a73cc88a808","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2aedd953dfdf4ab98df88aefe1133503","tabbable":null,"tooltip":null,"value":548}},"a33ea7c326fe4859a6eeb6324537e29d":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_52ddde5887ed4a3d83c8adebc13d2387","placeholder":"​","style":"IPY_MODEL_e06705139cd04e1ea65f358a06c3f7cc","tabbable":null,"tooltip":null,"value":"config.json: 100%"}},"a661de9610b34f9cb36a026a4e15aae9":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ae3558f346c14038a75a88bdce4d9fbd":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"FloatProgressModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"ProgressView","bar_style":"success","description":"","description_allow_html":false,"layout":"IPY_MODEL_c4dbbad7fcbf40209cc53a5481508ca6","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_6f552fcc4d28402d91491bfaf2989e81","tabbable":null,"tooltip":null,"value":346345912}},"aec2ed624beb4f7b917880dc4b550370":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"c0e52e28ecda4e6696bcf3b7647cd6ed":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"c11823a2992e4c33ad23d0b61b520751":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_044f8e96b50e4c33b25e342091f4ec64","placeholder":"​","style":"IPY_MODEL_129b638c005640a591de9920f26201d1","tabbable":null,"tooltip":null,"value":" 436/436 [00:00&lt;00:00, 48.9kB/s]"}},"c4dbbad7fcbf40209cc53a5481508ca6":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"db95f8aa3a684369afcd0a73fda7b405":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"ProgressStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e06705139cd04e1ea65f358a06c3f7cc":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"e20b857897f74db885a8dfc6d96887d5":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}},"e212b430e00d4a5d967a385deecaab91":{"model_module":"@jupyter-widgets/base","model_module_version":"2.0.0","model_name":"LayoutModel","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"2.0.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border_bottom":null,"border_left":null,"border_right":null,"border_top":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e32df049e2e74f7f9db8b8e029b03209":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLModel","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"2.0.0","_view_name":"HTMLView","description":"","description_allow_html":false,"layout":"IPY_MODEL_898e4be9614645c1b11417befaa72cc7","placeholder":"​","style":"IPY_MODEL_62081223720d477b91f099e636f67ba2","tabbable":null,"tooltip":null,"value":" 548/548 [00:00&lt;00:00, 76.3kB/s]"}},"ea596f96884c4955a34cb895b3564ef1":{"model_module":"@jupyter-widgets/controls","model_module_version":"2.0.0","model_name":"HTMLStyleModel","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"2.0.0","_model_name":"HTMLStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"2.0.0","_view_name":"StyleView","background":null,"description_width":"","font_size":null,"text_color":null}}},"version_major":2,"version_minor":0}}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **biplet_dino_mast3r_ps3_ply_01** \n\n","metadata":{"id":"qDQLX3PArmh8","papermill":{"duration":0.003504,"end_time":"2026-01-20T01:06:31.022336","exception":false,"start_time":"2026-01-20T01:06:31.018832","status":"completed"},"tags":[]}},{"cell_type":"markdown","source":"https://www.kaggle.com/code/stpeteishii/bike-biplet-dino-mast3r-ps3-gs-kg-41cpu","metadata":{}},{"cell_type":"code","source":"\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image, ImageFilter\nimport struct\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection - CRITICAL for memory\n GLOBAL_TOPK = 20 # Reduced from 50 - each image pairs with top 20\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\"\n \n # MASt3R - Reduced size for memory\n MAST3R_MODEL = \"/kaggle/working/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n MAST3R_IMAGE_SIZE = 224 # Small size to save memory\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n torch.cuda.synchronize()\n\ndef get_memory_info():\n \"\"\"Get current memory usage\"\"\"\n if torch.cuda.is_available():\n allocated = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n \n import psutil\n cpu_mem = psutil.virtual_memory().percent\n print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n# ============================================================================\n# Environment Setup\n# ============================================================================\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"❌ Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_base_environment():\n \"\"\"Setup base Python environment\"\"\"\n print(\"\\n=== Setting up Base Environment ===\")\n \n # NumPy fix for Python 3.12\n print(\"\\nπŸ“¦ Fixing NumPy...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n \n # PyTorch\n print(\"\\nπŸ“¦ Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n \n # Core utilities\n print(\"\\nπŸ“¦ Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\",\n \"scipy\", # for rotation conversions and image resizing\n \"psutil\" # for memory monitoring\n ])\n \n # Transformers for DINO\n print(\"\\nπŸ“¦ Installing transformers...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n \n # pycolmap for COLMAP format\n print(\"\\nπŸ“¦ Installing pycolmap...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n \n print(\"βœ“ Base environment setup complete!\")\n\n\ndef setup_mast3r():\n \"\"\"Install and setup MASt3R\"\"\"\n print(\"\\n=== Setting up MASt3R ===\")\n \n os.chdir('/kaggle/working')\n \n # Remove existing installation\n if os.path.exists('mast3r'):\n print(\"Removing existing MASt3R installation...\")\n os.system('rm -rf mast3r')\n \n # Clone repository\n print(\"Cloning MASt3R repository...\")\n os.system('git clone --recursive https://github.com/naver/mast3r')\n os.chdir('/kaggle/working/mast3r')\n \n # Check dust3r directory\n print(\"Checking dust3r structure...\")\n os.system('ls -la dust3r/')\n \n # Install dust3r\n print(\"Installing dust3r...\")\n os.system('cd dust3r && python -m pip install -e .')\n \n # Install croco\n print(\"Installing croco...\")\n os.system('cd dust3r/croco && python -m pip install -e .')\n \n # Install requirements\n print(\"Installing MASt3R requirements...\")\n os.system('pip install -r requirements.txt')\n \n # Download model weights\n print(\"Downloading model weights...\")\n os.system('mkdir -p checkpoints')\n os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n \n # Install additional dependencies\n print(\"Installing additional dependencies...\")\n os.system('pip install trimesh matplotlib roma')\n \n # Add to path\n sys.path.insert(0, '/kaggle/working/mast3r')\n sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n \n # Verification\n print(\"\\nπŸ” Verifying MASt3R installation...\")\n try:\n from mast3r.model import AsymmetricMASt3R\n print(\" βœ“ MASt3R import: OK\")\n except Exception as e:\n print(f\" ❌ MASt3R import failed: {e}\")\n raise\n \n print(\"βœ“ MASt3R setup complete!\")\n\ndef setup_gaussian_splatting():\n \"\"\"Setup Gaussian Splatting\"\"\"\n print(\"\\n=== Setting up Gaussian Splatting ===\")\n \n os.chdir('/kaggle/working')\n \n WORK_DIR = \"gaussian-splatting\"\n \n if not os.path.exists(WORK_DIR):\n print(\"Cloning Gaussian Splatting repository...\")\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"βœ“ Repository already exists\")\n \n os.chdir(WORK_DIR)\n \n # Install requirements\n print(\"Installing Gaussian Splatting requirements...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n \n # Build submodules\n print(\"\\nπŸ“¦ Building Gaussian Splatting submodules...\")\n \n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n \n for name, repo in submodules.items():\n print(f\"\\nπŸ“¦ Installing {name}...\")\n path = os.path.join(\"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n \n print(\"βœ“ Gaussian Splatting setup complete!\")\n","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\nsetup_base_environment()\nclear_memory()\n\nsetup_mast3r()\nclear_memory()\n","metadata":{"trusted":true,"_kg_hide-output":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 0: Biplet-Square Normalization (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = 'output/images_biplet'\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n converted_count = 0\n size_stats = {}\n\n for img_file in sorted(os.listdir(input_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n print(f\" βœ“ {img_file}: {original_size} β†’ 2 square images generated\")\n\n except Exception as e:\n print(f\" βœ— Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def generate_two_crops(img, size):\n \"\"\"\n Generates two square crops from an image.\n \"\"\"\n # If size is a tuple or list, extract the first value\n if isinstance(size, (tuple, list)):\n size = size[0]\n \n width, height = img.size\n crops = {}\n \n if width >= height:\n # Landscape: Split into left and right squares\n box_left = (0, 0, height, height)\n box_right = (width - height, 0, width, height)\n crops['left'] = img.crop(box_left).resize((size, size), Image.LANCZOS)\n crops['right'] = img.crop(box_right).resize((size, size), Image.LANCZOS)\n else:\n # Portrait: Split into top and bottom squares\n box_top = (0, 0, width, width)\n box_bottom = (0, height - width, width, height)\n crops['top'] = img.crop(box_top).resize((size, size), Image.LANCZOS)\n crops['bottom'] = img.crop(box_bottom).resize((size, size), Image.LANCZOS)\n \n return crops","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 1: DINO-based Pair Selection (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors with memory management\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n batch_size = 4 # Small batch to save memory\n \n for i in tqdm(range(0, len(image_paths), batch_size)):\n batch_paths = image_paths[i:i+batch_size]\n batch_imgs = []\n \n for img_path in batch_paths:\n img = load_torch_image(img_path, device)\n batch_imgs.append(img)\n \n batch_tensor = torch.cat(batch_imgs, dim=0)\n \n with torch.no_grad():\n inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n \n # Clear batch memory\n del batch_tensor, inputs, outputs, desc\n clear_memory()\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model, processor\n clear_memory()\n \n print(\"After DINO extraction:\")\n get_memory_info()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n # Remove duplicates\n pairs = list(set(pairs))\n \n return pairs\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n \"\"\"\n Select diverse pairs to ensure good image coverage\n Strategy: Select pairs that maximize image coverage\n \"\"\"\n import random\n random.seed(42)\n \n if len(pairs) <= max_pairs:\n return pairs\n \n print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n \n # Count how many times each image appears in pairs\n image_counts = {i: 0 for i in range(num_images)}\n for i, j in pairs:\n image_counts[i] += 1\n image_counts[j] += 1\n \n # Sort pairs by: prefer pairs with less-connected images\n def pair_score(pair):\n i, j = pair\n # Lower score = images appear in fewer pairs = more diverse\n return image_counts[i] + image_counts[j]\n \n pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n pairs_scored.sort(key=lambda x: x[1])\n \n # Select pairs greedily to maximize coverage\n selected = []\n selected_images = set()\n \n # Phase 1: Select pairs that add new images (greedy coverage)\n for pair, score in pairs_scored:\n if len(selected) >= max_pairs:\n break\n i, j = pair\n # Prefer pairs that include new images\n if i not in selected_images or j not in selected_images:\n selected.append(pair)\n selected_images.add(i)\n selected_images.add(j)\n \n # Phase 2: Fill remaining slots with high-similarity pairs\n if len(selected) < max_pairs:\n remaining = [p for p, s in pairs_scored if p not in selected]\n random.shuffle(remaining)\n selected.extend(remaining[:max_pairs - len(selected)])\n \n print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n \n return selected\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n \"\"\"DINO-based pair selection with intelligent limiting\"\"\"\n device = Config.DEVICE\n\n # DINO global features\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from DINO: {len(pairs)}\")\n \n # Apply intelligent pair selection if limit specified\n if max_pairs and len(pairs) > max_pairs:\n pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n \n return pairs\n\n# ============================================================================\n# Step 2: MASt3R Reconstruction (REPLACES ALIKED/LIGHTGLUE/COLMAP)\n# ============================================================================\n\ndef load_mast3r_model(device='cuda'):\n \"\"\"Load MASt3R model\"\"\"\n from mast3r.model import AsymmetricMASt3R\n \n model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n model.eval()\n \n print(f\"βœ“ MASt3R model loaded on {device}\")\n return model\n\ndef load_images_for_mast3r(image_paths, size=224):\n \"\"\"Load images using DUSt3R's format with reduced size\"\"\"\n print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n \n from dust3r.utils.image import load_images\n \n # Load images using DUSt3R's loader with reduced size\n images = load_images(image_paths, size=size, verbose=True)\n \n return images\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, max_pairs=None):\n \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n print(\"\\n=== Running MASt3R Reconstruction ===\")\n print(\"Initial memory state:\")\n get_memory_info()\n \n from dust3r.inference import inference\n from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n \n # Limit number of pairs if specified\n if max_pairs and len(pairs) > max_pairs:\n print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n # Select pairs more evenly distributed\n step = max(1, len(pairs) // max_pairs)\n pairs = pairs[::step][:max_pairs]\n \n print(f\"Processing {len(pairs)} pairs...\")\n \n # Load images in smaller size\n print(f\"Loading {len(image_paths)} images at {Config.MAST3R_IMAGE_SIZE}x{Config.MAST3R_IMAGE_SIZE}...\")\n images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n \n print(f\"Loaded {len(images)} images\")\n print(\"After loading images:\")\n get_memory_info()\n \n # Create all image pairs at once\n print(f\"Creating {len(pairs)} image pairs...\")\n mast3r_pairs = []\n for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n mast3r_pairs.append((images[idx1], images[idx2]))\n \n print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n \n # Run inference (this returns the dict format we need)\n output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n \n # Clear pairs from memory\n del mast3r_pairs\n clear_memory()\n \n print(\"βœ“ MASt3R inference complete\")\n print(\"After inference:\")\n get_memory_info()\n \n # Global alignment\n print(\"Running global alignment...\")\n scene = global_aligner(\n output, \n device=device, \n mode=GlobalAlignerMode.PointCloudOptimizer\n )\n \n # Clear output after creating scene\n del output\n clear_memory()\n \n print(\"Computing global alignment...\")\n loss = scene.compute_global_alignment(\n init=\"mst\", \n niter=150, # Reduced from 300\n schedule='cosine', \n lr=0.01\n )\n \n print(f\"βœ“ Global alignment complete (final loss: {loss:.6f})\")\n print(\"Final memory state:\")\n get_memory_info()\n \n return scene, images","metadata":{"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# process3 start","metadata":{}},{"cell_type":"code","source":"# ============================================================================\n# COLMAP Conversion (process3_05.py)\n# ============================================================================\n\ndef convert_mast3r_to_colmap(\n scene,\n output_dir: str,\n min_conf_thr: float = 2.0,\n clean_depth: bool = False,\n mask_images: bool = True,\n verbose: bool = True\n) -> str:\n \"\"\"Converts a MASt3R scene to COLMAP format.\"\"\"\n output_path = Path(output_dir)\n output_path.mkdir(parents=True, exist_ok=True)\n\n sparse_dir = output_path / \"sparse\" / \"0\"\n sparse_dir.mkdir(parents=True, exist_ok=True)\n\n images_dir = output_path / \"images\"\n images_dir.mkdir(parents=True, exist_ok=True)\n\n depth_dir = output_path / \"stereo\" / \"depth_maps\"\n depth_dir.mkdir(parents=True, exist_ok=True)\n\n normal_dir = output_path / \"stereo\" / \"normal_maps\"\n normal_dir.mkdir(parents=True, exist_ok=True)\n\n mask_dir = None\n if mask_images:\n mask_dir = output_path / \"stereo\" / \"confidence_maps\"\n mask_dir.mkdir(parents=True, exist_ok=True)\n\n if verbose:\n print(f\"Converting MASt3R scene to COLMAP format...\")\n print(f\"Output directory: {output_dir}\")\n\n cameras, images_data, points3D = extract_scene_data(scene, min_conf_thr, verbose)\n\n if verbose:\n print(f\"Extracted {len(cameras)} cameras\")\n print(f\"Extracted {len(images_data)} images\")\n print(f\"Extracted {len(points3D)} 3D points\")\n\n save_image_data(scene, images_dir, depth_dir, normal_dir, mask_dir, min_conf_thr, verbose)\n\n write_cameras_binary(cameras, sparse_dir / \"cameras.bin\")\n write_images_binary(images_data, sparse_dir / \"images.bin\")\n write_points3d_binary(points3D, sparse_dir / \"points3D.bin\")\n\n if verbose:\n print(f\"βœ“ COLMAP conversion completed\")\n print(f\" Sparse model: {sparse_dir}\")\n print(f\" Images: {images_dir}\")\n print(f\" Depth maps: {depth_dir}\")\n print(f\" Normal maps: {normal_dir}\")\n\n return str(output_path)\n\n\ndef extract_scene_data(scene, min_conf_thr: float, verbose: bool):\n \"\"\"Extracts cameras, images, and 3D points from a MASt3R scene.\"\"\"\n cameras = {}\n images_data = {}\n\n num_images = len(scene.imgs)\n\n all_confidences = scene.get_conf()\n all_pts3d = scene.get_pts3d()\n\n for idx in range(num_images):\n img = scene.imgs[idx]\n h, w = img.shape[:2]\n\n camera_id = 1\n\n if camera_id not in cameras:\n # Simplified pinhole model\n focal_length = max(w, h) * 1.2\n cx = w / 2.0\n cy = h / 2.0\n\n cameras[camera_id] = {\n 'id': camera_id,\n 'model': 'PINHOLE',\n 'width': w,\n 'height': h,\n 'params': np.array([focal_length, focal_length, cx, cy])\n }\n\n pts3d = all_pts3d[idx]\n confidence = all_confidences[idx]\n\n pose = estimate_camera_pose(pts3d, confidence, min_conf_thr)\n qvec, tvec = matrix_to_quaternion_translation(pose)\n\n image_name = f\"image_{idx:04d}.jpg\"\n\n images_data[idx + 1] = {\n 'id': idx + 1,\n 'qvec': qvec,\n 'tvec': tvec,\n 'camera_id': camera_id,\n 'name': image_name,\n 'xys': np.array([]),\n 'point3D_ids': np.array([])\n }\n\n points3D = extract_3d_points(scene, min_conf_thr, verbose)\n\n return cameras, images_data, points3D\n\n\ndef estimate_camera_pose(pts3d: np.ndarray, confidence: np.ndarray, min_conf_thr: float) -> np.ndarray:\n \"\"\"Estimates camera pose from 3D points.\"\"\"\n if hasattr(pts3d, 'cpu'):\n pts3d = pts3d.detach().cpu().numpy()\n if hasattr(confidence, 'cpu'):\n confidence = confidence.detach().cpu().numpy()\n\n h, w = pts3d.shape[:2]\n pts_flat = pts3d.reshape(-1, 3)\n conf_flat = confidence.reshape(-1)\n\n mask = conf_flat > min_conf_thr\n valid_pts = pts_flat[mask]\n\n if len(valid_pts) < 4:\n return np.eye(4)\n\n # Use median to determine center (translation)\n center = np.median(valid_pts, axis=0)\n pose = np.eye(4)\n pose[:3, 3] = -center\n\n return pose\n\n\ndef matrix_to_quaternion_translation(matrix: np.ndarray):\n \"\"\"Converts a 4x4 transformation matrix to a quaternion and translation vector.\"\"\"\n R = matrix[:3, :3]\n t = matrix[:3, 3]\n\n qw = np.sqrt(1.0 + R[0, 0] + R[1, 1] + R[2, 2]) / 2.0\n qx = (R[2, 1] - R[1, 2]) / (4.0 * qw)\n qy = (R[0, 2] - R[2, 0]) / (4.0 * qw)\n qz = (R[1, 0] - R[0, 1]) / (4.0 * qw)\n\n qvec = np.array([qw, qx, qy, qz])\n\n return qvec, t\n\n\ndef extract_3d_points(scene, min_conf_thr: float, verbose: bool):\n \"\"\"Extracts 3D points from the scene.\"\"\"\n points3D = {}\n point_id = 1\n\n num_images = len(scene.imgs)\n all_confidences = scene.get_conf()\n all_pts3d = scene.get_pts3d()\n\n for idx in range(num_images):\n pts3d = all_pts3d[idx]\n confidence = all_confidences[idx]\n img = scene.imgs[idx]\n\n if hasattr(pts3d, 'cpu'):\n pts3d = pts3d.detach().cpu().numpy()\n if hasattr(confidence, 'cpu'):\n confidence = confidence.detach().cpu().numpy()\n if hasattr(img, 'cpu'):\n img = img.detach().cpu().numpy()\n\n h, w = pts3d.shape[:2]\n pts_flat = pts3d.reshape(-1, 3)\n conf_flat = confidence.reshape(-1)\n\n # Fix: Correct extraction of color information\n if len(img.shape) == 3:\n colors = img.reshape(-1, 3)\n # Convert to 0-255 if normalized\n if colors.max() <= 1.0:\n colors = (colors * 255).astype(np.uint8)\n else:\n colors = colors.astype(np.uint8)\n else:\n # Grayscale case\n gray = img.reshape(-1)\n if gray.max() <= 1.0:\n gray = (gray * 255).astype(np.uint8)\n else:\n gray = gray.astype(np.uint8)\n colors = np.stack([gray] * 3, axis=1)\n\n mask = conf_flat > min_conf_thr\n\n # Limit points per image to maintain performance\n if mask.sum() > 10000:\n indices = np.where(mask)[0]\n sampled_indices = np.random.choice(indices, size=10000, replace=False)\n mask = np.zeros_like(mask, dtype=bool)\n mask[sampled_indices] = True\n\n valid_pts = pts_flat[mask]\n valid_colors = colors[mask]\n\n for pt, color in zip(valid_pts, valid_colors):\n points3D[point_id] = {\n 'id': point_id,\n 'xyz': pt,\n 'rgb': color.astype(np.uint8),\n 'error': 0.0,\n 'image_ids': np.array([idx + 1]),\n 'point2D_idxs': np.array([0])\n }\n point_id += 1\n\n if verbose:\n print(f\"Extracted {len(points3D)} 3D points\")\n\n return points3D\n\n\ndef save_image_data(scene, images_dir, depth_dir, normal_dir, mask_dir, min_conf_thr, verbose):\n \"\"\"\n Saves images, depth maps, normal maps, and confidence masks.\n Important: Uses existing color images if they already exist in colmap/images.\n \"\"\"\n import cv2\n import shutil\n from PIL import Image as PILImage\n \n num_images = len(scene.imgs)\n all_confidences = scene.get_conf()\n all_pts3d = scene.get_pts3d()\n\n # Important: Use existing images in colmap/images to preserve original color\n existing_images = sorted(images_dir.glob(\"*.jpg\")) + sorted(images_dir.glob(\"*.jpeg\"))\n use_existing = len(existing_images) == num_images\n \n if use_existing and verbose:\n print(f\"β˜… Using existing images from colmap/images (preserving original color)\")\n\n for idx in range(num_images):\n image_name = f\"image_{idx:04d}.jpg\"\n\n if use_existing and idx < len(existing_images):\n # Copy existing image to preserve original colors\n existing_img_path = existing_images[idx]\n target_path = images_dir / image_name\n if existing_img_path != target_path:\n shutil.copy(existing_img_path, target_path)\n if verbose and idx == 0:\n print(f\" Using existing image: {existing_img_path.name} -> {image_name}\")\n else:\n # Save image from MASt3R scene\n img = scene.imgs[idx]\n if hasattr(img, 'cpu'):\n img = img.detach().cpu().numpy()\n\n if img.dtype != np.uint8:\n if img.max() <= 1.0:\n img = (img * 255).astype(np.uint8)\n else:\n img = img.astype(np.uint8)\n \n if len(img.shape) == 3 and img.shape[2] == 3:\n cv2.imwrite(str(images_dir / image_name), cv2.cvtColor(img, cv2.COLOR_RGB2BGR))\n else:\n cv2.imwrite(str(images_dir / image_name), img)\n\n pts3d = all_pts3d[idx]\n if hasattr(pts3d, 'cpu'):\n pts3d = pts3d.detach().cpu().numpy()\n\n # Generate depth map\n depth = np.linalg.norm(pts3d, axis=2)\n depth_name = image_name.replace('.jpg', '.geometric.bin')\n save_depth_map(depth, depth_dir / depth_name)\n\n # Generate normal map\n normals = compute_normals_from_depth(pts3d)\n normal_name = image_name.replace('.jpg', '.geometric.bin')\n save_normal_map(normals, normal_dir / normal_name)\n\n # Generate confidence mask if requested\n if mask_dir is not None:\n confidence = all_confidences[idx]\n if hasattr(confidence, 'cpu'):\n confidence = confidence.detach().cpu().numpy()\n\n mask = (confidence > min_conf_thr).astype(np.uint8) * 255\n mask_name = image_name.replace('.jpg', '.png')\n cv2.imwrite(str(mask_dir / mask_name), mask)\n\n if verbose:\n print(f\"Saved {num_images} images with depth/normal maps\")\n\n\ndef compute_normals_from_depth(pts3d: np.ndarray) -> np.ndarray:\n \"\"\"Computes surface normals from 3D points.\"\"\"\n h, w = pts3d.shape[:2]\n normals = np.zeros_like(pts3d)\n\n for i in range(1, h - 1):\n for j in range(1, w - 1):\n px = pts3d[i, j + 1] - pts3d[i, j - 1]\n py = pts3d[i + 1, j] - pts3d[i - 1, j]\n normal = np.cross(px, py)\n norm = np.linalg.norm(normal)\n if norm > 0:\n normals[i, j] = normal / norm\n\n return normals\n\n\ndef save_depth_map(depth: np.ndarray, path: Path):\n \"\"\"Saves depth map in COLMAP binary format.\"\"\"\n h, w = depth.shape\n\n with open(path, 'wb') as f:\n f.write(struct.pack('i', w))\n f.write(struct.pack('i', h))\n f.write(struct.pack('i', 1))\n depth_flat = depth.astype(np.float32).flatten()\n f.write(depth_flat.tobytes())\n\n\ndef save_normal_map(normals: np.ndarray, path: Path):\n \"\"\"Saves normal map in COLMAP binary format.\"\"\"\n h, w = normals.shape[:2]\n\n with open(path, 'wb') as f:\n f.write(struct.pack('i', w))\n f.write(struct.pack('i', h))\n f.write(struct.pack('i', 3))\n normals_flat = normals.astype(np.float32).reshape(-1)\n f.write(normals_flat.tobytes())\n\n\ndef write_cameras_binary(cameras, path):\n \"\"\"Writes cameras.bin in COLMAP binary format.\"\"\"\n with open(path, 'wb') as f:\n f.write(struct.pack('Q', len(cameras)))\n\n for camera in cameras.values():\n f.write(struct.pack('i', camera['id']))\n f.write(struct.pack('i', 1)) # PINHOLE = 1\n f.write(struct.pack('Q', camera['width']))\n f.write(struct.pack('Q', camera['height']))\n\n for param in camera['params']:\n f.write(struct.pack('d', param))\n\n\ndef write_images_binary(images, path):\n \"\"\"Writes images.bin in COLMAP binary format.\"\"\"\n with open(path, 'wb') as f:\n f.write(struct.pack('Q', len(images)))\n\n for img in images.values():\n f.write(struct.pack('i', img['id']))\n\n for q in img['qvec']:\n f.write(struct.pack('d', q))\n\n for t in img['tvec']:\n f.write(struct.pack('d', t))\n\n f.write(struct.pack('i', img['camera_id']))\n\n name_bytes = img['name'].encode('utf-8') + b'\\x00'\n f.write(name_bytes)\n\n f.write(struct.pack('Q', len(img['xys'])))\n for xy, p3d_id in zip(img['xys'], img['point3D_ids']):\n f.write(struct.pack('dd', xy[0], xy[1]))\n f.write(struct.pack('Q', p3d_id))\n\n\ndef write_points3d_binary(points3D, path):\n \"\"\"Writes points3D.bin in COLMAP binary format.\"\"\"\n with open(path, 'wb') as f:\n f.write(struct.pack('Q', len(points3D)))\n\n for pt in points3D.values():\n f.write(struct.pack('Q', pt['id']))\n\n for coord in pt['xyz']:\n f.write(struct.pack('d', coord))\n\n for c in pt['rgb']:\n f.write(struct.pack('B', c))\n\n f.write(struct.pack('d', pt['error']))\n\n f.write(struct.pack('Q', len(pt['image_ids'])))\n for img_id, pt2d_idx in zip(pt['image_ids'], pt['point2D_idxs']):\n f.write(struct.pack('i', img_id))\n f.write(struct.pack('i', pt2d_idx))","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# process3 end","metadata":{}},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# main_pipeline","metadata":{}},{"cell_type":"code","source":"image_dir = \"/kaggle/input/two-dogs/bike15\"\noutput_dir = \"/kaggle/working/output\"\n\nsquare_size=1024 \niterations=1000 \nmax_images=30\nmax_pairs=1000 \nmax_points=1000000 ","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"os.makedirs(output_dir, exist_ok=True)\nprocessed_image_dir = os.path.join(output_dir, \"processed_images\")\n\n# Get original images first\noriginal_image_paths = sorted([\n os.path.join(image_dir, f)\n for f in os.listdir(image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n])","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# Process the temp directory\nnormalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=processed_image_dir,\n size=square_size\n)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"!ls /kaggle/working/output/processed_images","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# Get processed image paths\nimage_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n])\n\nprint(f\"\\nπŸ“Έ Processing {len(image_paths)} images (after biplet-square)\")\nprint(f\"⚠️ Will use maximum {max_pairs} pairs to save memory\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# Step 2: DINO-based pair selection\nprint(\"\\n\" + \"=\"*70)\nprint(\"Step 2: DINO Pair Selection\")\nprint(\"=\"*70)\n\npairs = get_image_pairs_dino(image_paths, max_pairs=max_pairs)\nclear_memory()\n\nprint(f\"βœ“ Using {len(pairs)} pairs for reconstruction\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"processed_image_dir = '/kaggle/working/output/processed_images'\nprocessed_image_paths = sorted([\n os.path.join(processed_image_dir, f) \n for f in os.listdir(processed_image_dir) \n if f.endswith(('.jpg', '.jpeg', '.png'))\n])\n\nprint(f\"Found {len(processed_image_paths)} processed images\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"print(len(image_paths))\nprint(len(processed_image_paths))","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# Step 3: MASt3R reconstruction\nprint(\"\\n\" + \"=\"*70)\nprint(\"Step 3: MASt3R Reconstruction\")\nprint(\"=\"*70)\n\ndevice = Config.DEVICE\nmodel = load_mast3r_model(device)\n\nscene, mast3r_images = run_mast3r_pairs(\n model, processed_image_paths, pairs, device,\n max_pairs=None # Already limited in get_image_pairs_dino\n)\n\n# Clear model from memory\ndel model\nclear_memory()","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"colmap_dir = convert_mast3r_to_colmap(\n scene=scene,\n output_dir='/kaggle/working/output/colmap',\n min_conf_thr=2.0,\n clean_depth=False,\n mask_images=True,\n verbose=True\n)\n\nprint(f\"COLMAP data created at: {colmap_dir}\")\nprint(f\" - Sparse model: {colmap_dir}/sparse/0/\")\nprint(f\" - Images: {colmap_dir}/images/\")\nprint(f\" - Depth maps: {colmap_dir}/stereo/depth_maps/\")","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"!pip install open3d","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def create_point_cloud_visualization(colmap_dir, output_path):\n \"\"\"\n Creates a lightweight 3D point cloud visualization from COLMAP data (CPU compatible).\n \"\"\"\n print(\"\\n=== Creating Point Cloud Visualization (CPU) ===\")\n \n import struct\n import open3d as o3d\n import numpy as np\n import os\n \n # Path to points3D.bin\n points3d_path = os.path.join(colmap_dir, 'sparse', '0', 'points3D.bin')\n \n points = []\n colors = []\n \n # Read the COLMAP binary file\n with open(points3d_path, 'rb') as f:\n # Read the number of points (uint64)\n num_points = struct.unpack('Q', f.read(8))[0]\n \n for _ in range(num_points):\n # point_id (uint64), xyz (3 * float64), rgb (3 * uint8), error (float64)\n point_id = struct.unpack('Q', f.read(8))[0]\n xyz = struct.unpack('ddd', f.read(24))\n rgb = struct.unpack('BBB', f.read(3))\n error = struct.unpack('d', f.read(8))[0]\n \n # Skip track information (image_id and point2D_idx pairs)\n track_length = struct.unpack('Q', f.read(8))[0]\n f.read(track_length * 8) # Each pair is 2 * int32 = 8 bytes\n \n points.append(xyz)\n # Normalize RGB to [0, 1] for Open3D\n colors.append([c / 255.0 for c in rgb])\n \n # Create Open3D PointCloud object\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(np.array(points))\n pcd.colors = o3d.utility.Vector3dVector(np.array(colors))\n \n # Save as PLY format\n o3d.io.write_point_cloud(output_path, pcd)\n \n print(f\"βœ“ Point cloud saved to: {output_path}\")\n print(f\" Total points: {len(points)}\")\n \n return output_path\n\n# Example Usage\noutput_ply = '/kaggle/working/output/point_cloud.ply'\ncreate_point_cloud_visualization(colmap_dir, output_ply)","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"pcd = o3d.io.read_point_cloud(output_ply)\no3d.visualization.draw_geometries([pcd])","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true},"outputs":[],"execution_count":null}]}