File size: 91,331 Bytes
2266195
1
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.12.12"},"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[],"machine_shape":"hm"},"kaggle":{"accelerator":"none","dataSources":[{"sourceId":14705005,"sourceType":"datasetVersion","datasetId":1429416}],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false},"papermill":{"default_parameters":{},"duration":967.270978,"end_time":"2026-01-20T01:22:34.649213","environment_variables":{},"exception":null,"input_path":"__notebook__.ipynb","output_path":"__notebook__.ipynb","parameters":{},"start_time":"2026-01-20T01:06:27.378235","version":"2.6.0"},"widgets":{"application/vnd.jupyter.widget-state+json":{"71659c5eb8704c428eb984e9dd6fca41":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_157b8fcb5f564a1f83feb30af2412dbc","IPY_MODEL_ab27d947283b4dda80bc0267ac0950d1","IPY_MODEL_dfb970745845470892fcef2a793fa722"],"layout":"IPY_MODEL_f2bb371554334d4bbee1839c7b3c5b6e"}},"157b8fcb5f564a1f83feb30af2412dbc":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b9643bbd15964e4a90b496fab872f754","placeholder":"​","style":"IPY_MODEL_817e2eaa5eb341f29c86a73c715c476a","value":"preprocessor_config.json: 100%"}},"ab27d947283b4dda80bc0267ac0950d1":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_335528a65ea74708bee26d778aac70b5","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_2912e19431b845a988f6617668ceecf8","value":436}},"dfb970745845470892fcef2a793fa722":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5db5e4aeb52848728293e9d082e4940c","placeholder":"​","style":"IPY_MODEL_d78328ca525d4bceab3890d57fa34ae5","value":" 436/436 [00:00&lt;00:00, 54.5kB/s]"}},"f2bb371554334d4bbee1839c7b3c5b6e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b9643bbd15964e4a90b496fab872f754":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"817e2eaa5eb341f29c86a73c715c476a":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"335528a65ea74708bee26d778aac70b5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"2912e19431b845a988f6617668ceecf8":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5db5e4aeb52848728293e9d082e4940c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d78328ca525d4bceab3890d57fa34ae5":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"627c1695bcb04598a36dd72c8a69e7f5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_c6ffcf6df8764f3faf59420f3373939d","IPY_MODEL_00cf6f11d4314130a64df95396b892ec","IPY_MODEL_ea5e39d79d7f4acfb8f3063d72311462"],"layout":"IPY_MODEL_4fce934bd4bf46baa7be4fb33e74d16f"}},"c6ffcf6df8764f3faf59420f3373939d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bd527001905b4329bbe4f58446b72d37","placeholder":"​","style":"IPY_MODEL_b807318a18134af7ae1ae18a4c9f8a13","value":"config.json: 100%"}},"00cf6f11d4314130a64df95396b892ec":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_3781299ab1244523931232163602cb44","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_932b36d8d6324e13ad1bfad07f93e57e","value":548}},"ea5e39d79d7f4acfb8f3063d72311462":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_37cb90e7e8d7424fb02e171ad3350c7c","placeholder":"​","style":"IPY_MODEL_9732ccda34774836a11d81da1520ac99","value":" 548/548 [00:00&lt;00:00, 68.8kB/s]"}},"4fce934bd4bf46baa7be4fb33e74d16f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bd527001905b4329bbe4f58446b72d37":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b807318a18134af7ae1ae18a4c9f8a13":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3781299ab1244523931232163602cb44":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"932b36d8d6324e13ad1bfad07f93e57e":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"37cb90e7e8d7424fb02e171ad3350c7c":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9732ccda34774836a11d81da1520ac99":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"69af90d36da248978f28892084155d27":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_789dc5db84b84e7299d7cd6a513500a3","IPY_MODEL_acc6ca7ddbba4ca6bf2a6a2c217d566b","IPY_MODEL_b7e774e19f264f738e8b365d93423b3d"],"layout":"IPY_MODEL_d1cb7996322e4b1e8124857839e305db"}},"789dc5db84b84e7299d7cd6a513500a3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_90a22edc704c4768ac3b1511d2e629ea","placeholder":"​","style":"IPY_MODEL_392c7092c9214557b0f26209ad32fcf1","value":"model.safetensors: 100%"}},"acc6ca7ddbba4ca6bf2a6a2c217d566b":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_74c7eb4966a14d65916259589ff1dc2e","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_45e0866962b0460e87c8ce180a96115f","value":346345912}},"b7e774e19f264f738e8b365d93423b3d":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_4b9c40e6d61d4b3aaaa6df96e695e831","placeholder":"​","style":"IPY_MODEL_732b1fca8bab4e3e98e5b80317fa9198","value":" 346M/346M [00:01&lt;00:00, 362MB/s]"}},"d1cb7996322e4b1e8124857839e305db":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"90a22edc704c4768ac3b1511d2e629ea":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"392c7092c9214557b0f26209ad32fcf1":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"74c7eb4966a14d65916259589ff1dc2e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"45e0866962b0460e87c8ce180a96115f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"4b9c40e6d61d4b3aaaa6df96e695e831":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"732b1fca8bab4e3e98e5b80317fa9198":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"fc6e0d42af74464da586a89397b74752":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f00b37c9901a4dde85ca44bf93aad7c4","IPY_MODEL_777478352a84439eb393cb5b6ea3fa1a","IPY_MODEL_81fd0c78a76244b48c99983a9cf66660"],"layout":"IPY_MODEL_fe164865bb9d49ecb4caf9389baa7975"}},"f00b37c9901a4dde85ca44bf93aad7c4":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_b3ba20d7739847a8ba8a7a19150cab5f","placeholder":"​","style":"IPY_MODEL_f22284b9edc04fb8937ae8c434f98cd7","value":"Loading weights: 100%"}},"777478352a84439eb393cb5b6ea3fa1a":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_dff1208989d34ad88b517c4ca788c6b2","max":223,"min":0,"orientation":"horizontal","style":"IPY_MODEL_bfdd2325fc234667980928f3871d04ca","value":223}},"81fd0c78a76244b48c99983a9cf66660":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_bccce2558d5a4e01840a388a39e0104e","placeholder":"​","style":"IPY_MODEL_e867a871e17c4f46a4959900d066f540","value":" 223/223 [00:00&lt;00:00, 1154.74it/s, Materializing param=layernorm.weight]"}},"fe164865bb9d49ecb4caf9389baa7975":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b3ba20d7739847a8ba8a7a19150cab5f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"f22284b9edc04fb8937ae8c434f98cd7":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dff1208989d34ad88b517c4ca788c6b2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"bfdd2325fc234667980928f3871d04ca":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"bccce2558d5a4e01840a388a39e0104e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e867a871e17c4f46a4959900d066f540":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"","metadata":{"id":"ukeu2UQAGpBV"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **biplet_dino_mast3r_ps3_gs_kg**\n\n","metadata":{"id":"qDQLX3PArmh8","papermill":{"duration":0.003504,"end_time":"2026-01-20T01:06:31.022336","exception":false,"start_time":"2026-01-20T01:06:31.018832","status":"completed"},"tags":[]}},{"cell_type":"markdown","source":"# setup","metadata":{"id":"M4iuU0WnGpBY"}},{"cell_type":"code","source":"from google.colab import drive\ndrive.mount('/content/drive')","metadata":{"id":"vfqsbwoqlO7r","outputId":"da254904-082f-472d-8bf6-2117530cdd7f"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# MASt3R-based Gaussian Splatting Pipeline\n# Preserves: DINO pair selection + Biplet-Square Normalization\n# Replaces: ALIKED/LightGlue/COLMAP with MASt3R\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\nfrom PIL import Image, ImageFilter\nimport struct\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n    # Feature extraction\n    N_KEYPOINTS = 8192\n    IMAGE_SIZE = 1024\n\n    # Pair selection - CRITICAL for memory\n    GLOBAL_TOPK = 20  # Reduced from 50 - each image pairs with top 20\n    MIN_MATCHES = 10\n    RATIO_THR = 1.2\n\n    # Paths\n    DINO_MODEL = \"facebook/dinov2-base\"\n\n    # MASt3R - Reduced size for memory\n    MAST3R_MODEL = \"/content/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n    MAST3R_IMAGE_SIZE = 224  # Small size to save memory\n\n    # Device\n    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# ============================================================================\n# Memory Management Utilities\n# ============================================================================\n\ndef clear_memory():\n    \"\"\"Aggressively clear GPU and CPU memory\"\"\"\n    gc.collect()\n    if torch.cuda.is_available():\n        torch.cuda.empty_cache()\n        torch.cuda.synchronize()\n\ndef get_memory_info():\n    \"\"\"Get current memory usage\"\"\"\n    if torch.cuda.is_available():\n        allocated = torch.cuda.memory_allocated() / 1024**3\n        reserved = torch.cuda.memory_reserved() / 1024**3\n        print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n\n    import psutil\n    cpu_mem = psutil.virtual_memory().percent\n    print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n\n# ============================================================================\n# Environment Setup\n# ============================================================================\n\ndef run_cmd(cmd, check=True, capture=False):\n    \"\"\"Run command with better error handling\"\"\"\n    print(f\"Running: {' '.join(cmd)}\")\n    result = subprocess.run(\n        cmd,\n        capture_output=capture,\n        text=True,\n        check=False\n    )\n    if check and result.returncode != 0:\n        print(f\"❌ Command failed with code {result.returncode}\")\n        if capture:\n            print(f\"STDOUT: {result.stdout}\")\n            print(f\"STDERR: {result.stderr}\")\n    return result\n\n\ndef setup_base_environment():\n    \"\"\"Setup base Python environment\"\"\"\n    print(\"\\n=== Setting up Base Environment ===\")\n\n    # NumPy fix for Python 3.12\n    print(\"\\nπŸ“¦ Fixing NumPy...\")\n    run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n\n    # PyTorch\n    print(\"\\nπŸ“¦ Installing PyTorch...\")\n    run_cmd([\n        sys.executable, \"-m\", \"pip\", \"install\",\n        \"torch\", \"torchvision\", \"torchaudio\"\n    ])\n\n    # Core utilities\n    print(\"\\nπŸ“¦ Installing core utilities...\")\n    run_cmd([\n        sys.executable, \"-m\", \"pip\", \"install\",\n        \"opencv-python\",\n        \"pillow\",\n        \"imageio\",\n        \"imageio-ffmpeg\",\n        \"plyfile\",\n        \"tqdm\",\n        \"tensorboard\",\n        \"scipy\",  # for rotation conversions and image resizing\n        \"psutil\"  # for memory monitoring\n    ])\n\n    # Transformers for DINO\n    print(\"\\nπŸ“¦ Installing transformers...\")\n    run_cmd([\n        sys.executable, \"-m\", \"pip\", \"install\",\n        \"transformers>=4.45.0\"\n    ])\n\n    # pycolmap for COLMAP format\n    print(\"\\nπŸ“¦ Installing pycolmap...\")\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n    print(\"βœ“ Base environment setup complete!\")\n\n\ndef setup_mast3r():\n    \"\"\"Install and setup MASt3R\"\"\"\n    print(\"\\n=== Setting up MASt3R ===\")\n\n    os.chdir('/content')\n\n    # Remove existing installation\n    if os.path.exists('mast3r'):\n        print(\"Removing existing MASt3R installation...\")\n        os.system('rm -rf mast3r')\n\n    # Clone repository\n    print(\"Cloning MASt3R repository...\")\n    os.system('git clone --recursive https://github.com/naver/mast3r')\n    os.chdir('/content/mast3r')\n\n    # Check dust3r directory\n    print(\"Checking dust3r structure...\")\n    os.system('ls -la dust3r/')\n\n    # Install dust3r\n    print(\"Installing dust3r...\")\n    os.system('cd dust3r && python -m pip install -e .')\n\n    # Install croco\n    print(\"Installing croco...\")\n    os.system('cd dust3r/croco && python -m pip install -e .')\n\n    # Install requirements\n    print(\"Installing MASt3R requirements...\")\n    os.system('pip install -r requirements.txt')\n\n    # Download model weights\n    print(\"Downloading model weights...\")\n    os.system('mkdir -p checkpoints')\n    os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n\n    # Install additional dependencies\n    print(\"Installing additional dependencies...\")\n    os.system('pip install trimesh matplotlib roma')\n\n    # Add to path\n    sys.path.insert(0, '/content/mast3r')\n    sys.path.insert(0, '/content/mast3r/dust3r')\n\n    # Verification\n    print(\"\\nπŸ” Verifying MASt3R installation...\")\n    try:\n        from mast3r.model import AsymmetricMASt3R\n        print(\"  βœ“ MASt3R import: OK\")\n    except Exception as e:\n        print(f\"  ❌ MASt3R import failed: {e}\")\n        raise\n\n    print(\"βœ“ MASt3R setup complete!\")\n\ndef setup_gaussian_splatting():\n    \"\"\"Setup Gaussian Splatting\"\"\"\n    print(\"\\n=== Setting up Gaussian Splatting ===\")\n\n    os.chdir('/content')\n\n    WORK_DIR = \"gaussian-splatting\"\n\n    if not os.path.exists(WORK_DIR):\n        print(\"Cloning Gaussian Splatting repository...\")\n        run_cmd([\n            \"git\", \"clone\", \"--recursive\",\n            \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n            WORK_DIR\n        ])\n    else:\n        print(\"βœ“ Repository already exists\")\n\n    os.chdir(WORK_DIR)\n\n    # Install requirements\n    print(\"Installing Gaussian Splatting requirements...\")\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"-r\", \"requirements.txt\"])\n\n    # Build submodules\n    print(\"\\nπŸ“¦ Building Gaussian Splatting submodules...\")\n\n    submodules = {\n        \"diff-gaussian-rasterization\":\n            \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n        \"simple-knn\":\n            \"https://github.com/camenduru/simple-knn.git\"\n    }\n\n    for name, repo in submodules.items():\n        print(f\"\\nπŸ“¦ Installing {name}...\")\n        path = os.path.join(\"submodules\", name)\n        if not os.path.exists(path):\n            run_cmd([\"git\", \"clone\", repo, path])\n        run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n    print(\"βœ“ Gaussian Splatting setup complete!\")\n","metadata":{"execution":{"iopub.status.busy":"2026-02-02T08:53:09.950151Z","iopub.execute_input":"2026-02-02T08:53:09.950445Z","iopub.status.idle":"2026-02-02T08:53:09.967045Z","shell.execute_reply.started":"2026-02-02T08:53:09.95042Z","shell.execute_reply":"2026-02-02T08:53:09.966479Z"},"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true,"_kg_hide-output":true,"id":"hGX7IYJ6GpBZ"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\nsetup_base_environment()\nclear_memory()\n\nsetup_mast3r()\nclear_memory()\n\nsetup_gaussian_splatting()\nclear_memory()","metadata":{"trusted":true,"_kg_hide-output":true,"execution":{"iopub.status.busy":"2026-02-02T08:53:09.968021Z","iopub.execute_input":"2026-02-02T08:53:09.968253Z","iopub.status.idle":"2026-02-02T08:56:35.635976Z","shell.execute_reply.started":"2026-02-02T08:53:09.968233Z","shell.execute_reply":"2026-02-02T08:56:35.635328Z"},"id":"sIf3UgDZGpBa","outputId":"0a1eb888-aba1-4990-ab52-28584e9104ec"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"38nn_QqcGpBa"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# dino & mast3r","metadata":{"id":"L6OBEO0zGpBa"}},{"cell_type":"code","source":"# ============================================================================\n# Step 0: Biplet-Square Normalization (PRESERVED FROM ORIGINAL)\n# ============================================================================\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n    \"\"\"\n    Generates two square crops (Left & Right or Top & Bottom)\n    from each image in a directory and returns the output directory\n    and the list of generated file paths.\n\n    Args:\n        input_dir: Input directory containing source images\n        output_dir: Output directory for processed images\n        size: Target square size (default: 1024)\n        max_images: Maximum number of SOURCE images to process (default: None = all images)\n    \"\"\"\n    if output_dir is None:\n        output_dir = 'output/images_biplet'\n    os.makedirs(output_dir, exist_ok=True)\n\n    print(f\"--- Step 1: Biplet-Square Normalization ---\")\n    print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n    print()\n\n    generated_paths = []\n    converted_count = 0\n    size_stats = {}\n\n    # Sort for consistent processing order\n    image_files = sorted([f for f in os.listdir(input_dir)\n                         if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n\n    # β˜… max_images γ§ε…ƒη”»εƒζ•°γ‚’εˆΆι™\n    if max_images is not None:\n        image_files = image_files[:max_images]\n        print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n\n    for img_file in image_files:\n        input_path = os.path.join(input_dir, img_file)\n        try:\n            img = Image.open(input_path)\n            original_size = img.size\n\n            # Tracking original aspect ratios\n            size_key = f\"{original_size[0]}x{original_size[1]}\"\n            size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n            # Generate 2 crops using the helper function\n            crops = generate_two_crops(img, size)\n            base_name, ext = os.path.splitext(img_file)\n\n            for mode, cropped_img in crops.items():\n                output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n                cropped_img.save(output_path, quality=95)\n                generated_paths.append(output_path)\n\n            converted_count += 1\n            print(f\"  βœ“ {img_file}: {original_size} β†’ 2 square images generated\")\n\n        except Exception as e:\n            print(f\"  βœ— Error processing {img_file}: {e}\")\n\n    print(f\"\\nProcessing complete: {converted_count} source images processed\")\n    print(f\"Total output images: {len(generated_paths)}\")\n    print(f\"Original size distribution: {size_stats}\")\n\n    return output_dir, generated_paths\n\n\ndef generate_two_crops(img, size):\n    \"\"\"\n    Crops the image into a square and returns 2 variations\n    (Left/Right for landscape, Top/Bottom for portrait).\n    \"\"\"\n    width, height = img.size\n    crop_size = min(width, height)\n    crops = {}\n\n    if width > height:\n        # Landscape β†’ Left & Right\n        positions = {\n            'left': 0,\n            'right': width - crop_size\n        }\n        for mode, x_offset in positions.items():\n            box = (x_offset, 0, x_offset + crop_size, crop_size)\n            crops[mode] = img.crop(box).resize(\n                (size, size),\n                Image.Resampling.LANCZOS\n            )\n\n    else:\n        # Portrait or Square β†’ Top & Bottom\n        positions = {\n            'top': 0,\n            'bottom': height - crop_size\n        }\n        for mode, y_offset in positions.items():\n            box = (0, y_offset, crop_size, y_offset + crop_size)\n            crops[mode] = img.crop(box).resize(\n                (size, size),\n                Image.Resampling.LANCZOS\n            )\n\n    return crops\n\n# ============================================================================\n# Step 1: DINO-based Pair Selection (PRESERVED FROM ORIGINAL)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n    \"\"\"Load image as torch tensor\"\"\"\n    import torchvision.transforms as T\n\n    img = Image.open(fname).convert('RGB')\n    transform = T.Compose([\n        T.ToTensor(),\n    ])\n    return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n    \"\"\"Extract DINO global descriptors with memory management\"\"\"\n    print(\"\\n=== Extracting DINO Global Features ===\")\n    print(\"Initial memory state:\")\n    get_memory_info()\n\n    processor = AutoImageProcessor.from_pretrained(model_path)\n    model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n    global_descs = []\n    batch_size = 1  # Small batch to save memory\n\n    for i in tqdm(range(0, len(image_paths), batch_size)):\n        batch_paths = image_paths[i:i+batch_size]\n        batch_imgs = []\n\n        for img_path in batch_paths:\n            img = load_torch_image(img_path, device)\n            batch_imgs.append(img)\n\n        batch_tensor = torch.cat(batch_imgs, dim=0)\n\n        with torch.no_grad():\n            inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n            outputs = model(**inputs)\n            desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n            global_descs.append(desc.cpu())\n\n        # Clear batch memory\n        del batch_tensor, inputs, outputs, desc\n        clear_memory()\n\n    global_descs = torch.cat(global_descs, dim=0)\n\n    del model, processor\n    clear_memory()\n\n    print(\"After DINO extraction:\")\n    get_memory_info()\n\n    return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n    \"\"\"Build top-k similar pairs from global features\"\"\"\n    g = global_feats.to(device)\n    sim = g @ g.T\n    sim.fill_diagonal_(-1)\n\n    N = sim.size(0)\n    k = min(k, N - 1)\n\n    topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n    pairs = []\n    for i in range(N):\n        for j in topk_indices[i]:\n            j = j.item()\n            if i < j:\n                pairs.append((i, j))\n\n    # Remove duplicates\n    pairs = list(set(pairs))\n\n    return pairs\n\ndef select_diverse_pairs(pairs, max_pairs, num_images):\n    \"\"\"\n    Select diverse pairs to ensure good image coverage\n    Strategy: Select pairs that maximize image coverage\n    \"\"\"\n    import random\n    random.seed(42)\n\n    if len(pairs) <= max_pairs:\n        return pairs\n\n    print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n\n    # Count how many times each image appears in pairs\n    image_counts = {i: 0 for i in range(num_images)}\n    for i, j in pairs:\n        image_counts[i] += 1\n        image_counts[j] += 1\n\n    # Sort pairs by: prefer pairs with less-connected images\n    def pair_score(pair):\n        i, j = pair\n        # Lower score = images appear in fewer pairs = more diverse\n        return image_counts[i] + image_counts[j]\n\n    pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n    pairs_scored.sort(key=lambda x: x[1])\n\n    # Select pairs greedily to maximize coverage\n    selected = []\n    selected_images = set()\n\n    # Phase 1: Select pairs that add new images (greedy coverage)\n    for pair, score in pairs_scored:\n        if len(selected) >= max_pairs:\n            break\n        i, j = pair\n        # Prefer pairs that include new images\n        if i not in selected_images or j not in selected_images:\n            selected.append(pair)\n            selected_images.add(i)\n            selected_images.add(j)\n\n    # Phase 2: Fill remaining slots with high-similarity pairs\n    if len(selected) < max_pairs:\n        remaining = [p for p, s in pairs_scored if p not in selected]\n        random.shuffle(remaining)\n        selected.extend(remaining[:max_pairs - len(selected)])\n\n    print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n\n    return selected\n\ndef get_image_pairs_dino(image_paths, max_pairs=None):\n    \"\"\"DINO-based pair selection with intelligent limiting\"\"\"\n    device = Config.DEVICE\n\n    # DINO global features\n    global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n    pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n    print(f\"Initial pairs from DINO: {len(pairs)}\")\n\n    # Apply intelligent pair selection if limit specified\n    if max_pairs and len(pairs) > max_pairs:\n        pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n\n    return pairs\n\n# ============================================================================\n# Step 2: MASt3R Reconstruction (REPLACES ALIKED/LIGHTGLUE/COLMAP)\n# ============================================================================\n\ndef load_mast3r_model(device='cuda'):\n    \"\"\"Load MASt3R model\"\"\"\n    from mast3r.model import AsymmetricMASt3R\n\n    model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n    model.eval()\n\n    print(f\"βœ“ MASt3R model loaded on {device}\")\n    return model\n\ndef load_images_for_mast3r(image_paths, size=224):\n    \"\"\"Load images using DUSt3R's format with reduced size\"\"\"\n    print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n\n    from dust3r.utils.image import load_images\n\n    # Load images using DUSt3R's loader with reduced size\n    images = load_images(image_paths, size=size, verbose=True)\n\n    return images\n\ndef run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, max_pairs=None):\n    \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n    print(\"\\n=== Running MASt3R Reconstruction ===\")\n    print(\"Initial memory state:\")\n    get_memory_info()\n\n    from dust3r.inference import inference\n    from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n\n    # Limit number of pairs if specified\n    if max_pairs and len(pairs) > max_pairs:\n        print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n        # Select pairs more evenly distributed\n        step = max(1, len(pairs) // max_pairs)\n        pairs = pairs[::step][:max_pairs]\n\n    print(f\"Processing {len(pairs)} pairs...\")\n\n    # Load images in smaller size\n    print(f\"Loading {len(image_paths)} images at {Config.MAST3R_IMAGE_SIZE}x{Config.MAST3R_IMAGE_SIZE}...\")\n    images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n\n    print(f\"Loaded {len(images)} images\")\n    print(\"After loading images:\")\n    get_memory_info()\n\n    # Create all image pairs at once\n    print(f\"Creating {len(pairs)} image pairs...\")\n    mast3r_pairs = []\n    for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n        mast3r_pairs.append((images[idx1], images[idx2]))\n\n    print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n\n    # Run inference (this returns the dict format we need)\n    output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n\n    # Clear pairs from memory\n    del mast3r_pairs\n    clear_memory()\n\n    print(\"βœ“ MASt3R inference complete\")\n    print(\"After inference:\")\n    get_memory_info()\n\n    # Global alignment\n    print(\"Running global alignment...\")\n    scene = global_aligner(\n        output,\n        device=device,\n        mode=GlobalAlignerMode.PointCloudOptimizer\n    )\n\n    # Clear output after creating scene\n    del output\n    clear_memory()\n\n    print(\"Computing global alignment...\")\n    loss = scene.compute_global_alignment(\n        init=\"mst\",\n        niter=150,  # Reduced from 300\n        schedule='cosine',\n        lr=0.01\n    )\n\n    print(f\"βœ“ Global alignment complete (final loss: {loss:.6f})\")\n    print(\"Final memory state:\")\n    get_memory_info()\n\n    return scene, images","metadata":{"execution":{"iopub.status.busy":"2026-02-02T08:56:35.637478Z","iopub.execute_input":"2026-02-02T08:56:35.637807Z","iopub.status.idle":"2026-02-02T08:56:35.663238Z","shell.execute_reply.started":"2026-02-02T08:56:35.637784Z","shell.execute_reply":"2026-02-02T08:56:35.66271Z"},"papermill":{"duration":46.280727,"end_time":"2026-01-20T01:07:23.641872","exception":false,"start_time":"2026-01-20T01:06:37.361145","status":"completed"},"tags":[],"trusted":true,"id":"uMBRydm7GpBa"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"_tTGwERoGpBb"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# process3","metadata":{"id":"1FcMgB48GpBb"}},{"cell_type":"code","source":"# ============================================================================\n# COLMAP Conversion (process3_11.py) - COMPLETE FIXED VERSION - ply success\n# ============================================================================\n\nimport numpy as np\nimport cv2\nfrom pathlib import Path\nimport struct\nfrom scipy.spatial.transform import Rotation\nimport torch\nfrom PIL import Image\n\n\ndef write_next_bytes(fid, data, format_str):\n    \"\"\"Helper function to write bytes to file\"\"\"\n    if isinstance(data, (list, tuple, np.ndarray)):\n        fid.write(struct.pack(\"<\" + format_str, *data))\n    else:\n        fid.write(struct.pack(\"<\" + format_str, data))\n\n\ndef matrix_to_quaternion_translation(matrix: np.ndarray):\n    \"\"\"Robust conversion of 4x4 transformation matrix to quaternion and translation.\"\"\"\n    R = matrix[:3, :3]\n    t = matrix[:3, 3]\n\n    # Use scipy for robust quaternion conversion\n    rot = Rotation.from_matrix(R)\n    quat = rot.as_quat()  # Returns [x, y, z, w]\n\n    # COLMAP format is [w, x, y, z]\n    qvec = np.array([quat[3], quat[0], quat[1], quat[2]])\n\n    return qvec, t\n\n\ndef write_cameras_binary(cameras, path_to_model_file):\n    \"\"\"Write COLMAP cameras.bin file\"\"\"\n    with open(path_to_model_file, \"wb\") as fid:\n        write_next_bytes(fid, len(cameras), \"Q\")\n        for camera_id, cam in cameras.items():\n            model_id = 1  # PINHOLE\n            write_next_bytes(fid, camera_id, \"I\")\n            write_next_bytes(fid, model_id, \"I\")\n            write_next_bytes(fid, cam['width'], \"Q\")\n            write_next_bytes(fid, cam['height'], \"Q\")\n            for p in cam['params']:\n                write_next_bytes(fid, float(p), \"d\")\n\n\ndef write_images_binary(images, path_to_model_file):\n    \"\"\"Write COLMAP images.bin file\"\"\"\n    with open(path_to_model_file, \"wb\") as fid:\n        write_next_bytes(fid, len(images), \"Q\")\n        for image_id, img in images.items():\n            write_next_bytes(fid, image_id, \"I\")\n            write_next_bytes(fid, img['qvec'], \"dddd\")\n            write_next_bytes(fid, img['tvec'], \"ddd\")\n            write_next_bytes(fid, img['camera_id'], \"I\")\n\n            # Write image name\n            for char in img['name']:\n                write_next_bytes(fid, char.encode(\"utf-8\"), \"c\")\n            write_next_bytes(fid, b\"\\x00\", \"c\")\n\n            # Write 2D points\n            write_next_bytes(fid, len(img['xys']), \"Q\")\n            for xy, point3D_id in zip(img['xys'], img['point3D_ids']):\n                write_next_bytes(fid, xy, \"dd\")\n                write_next_bytes(fid, point3D_id, \"Q\")\n\n\ndef write_points3d_binary(points3D, path_to_model_file):\n    \"\"\"\n    Write COLMAP points3D.bin file\n\n    Args:\n        points3D: list or dict of 3D point data\n        path_to_model_file: path to points3D.bin\n    \"\"\"\n    with open(path_to_model_file, \"wb\") as fid:\n        # Write number of points\n        if isinstance(points3D, dict):\n            write_next_bytes(fid, len(points3D), \"Q\")\n            points_iter = points3D.values()\n        else:\n            write_next_bytes(fid, len(points3D), \"Q\")\n            points_iter = points3D\n\n        # Write each point\n        for point_id, point in enumerate(points_iter):\n            # Handle both dict with 'id' key and list with index\n            if isinstance(point, dict) and 'id' in point:\n                pid = point['id']\n            else:\n                pid = point_id\n\n            write_next_bytes(fid, pid, \"Q\")\n            write_next_bytes(fid, point['xyz'], \"ddd\")\n            write_next_bytes(fid, point['rgb'], \"BBB\")\n            write_next_bytes(fid, point['error'], \"d\")\n\n            # Write track\n            track_length = len(point['image_ids'])\n            write_next_bytes(fid, track_length, \"Q\")\n            for image_id, point2D_idx in zip(point['image_ids'], point['point2D_idxs']):\n                write_next_bytes(fid, int(image_id), \"I\")\n                write_next_bytes(fid, int(point2D_idx), \"I\")\n\n\ndef save_image_data(scene, images_dir, depth_dir, normal_dir, mask_dir, min_conf_thr, verbose, processed_image_paths=None):\n    \"\"\"Save RGB images, depth maps, normal maps, and masks\"\"\"\n    if verbose:\n        print(\"\\nSaving image data...\")\n\n    # Ensure directories exist\n    images_dir.mkdir(parents=True, exist_ok=True)\n    depth_dir.mkdir(parents=True, exist_ok=True)\n    normal_dir.mkdir(parents=True, exist_ok=True)\n    mask_dir.mkdir(parents=True, exist_ok=True)\n\n    # Get the number of views\n    if hasattr(scene, 'imgs'):\n        num_views = len(scene.imgs)\n        imgs = scene.imgs\n    elif hasattr(scene, 'views'):\n        num_views = len(scene.views)\n        imgs = scene.views\n    else:\n        if verbose:\n            print(\"  Warning: Cannot access views\")\n        return\n\n    # Use processed images if provided\n    if processed_image_paths is not None and len(processed_image_paths) > 0:\n        if verbose:\n            print(f\"  Using {len(processed_image_paths)} processed images\")\n\n        import shutil\n        for idx, src_path in enumerate(processed_image_paths):\n            if idx >= num_views:\n                break\n\n            try:\n                # Copy processed images\n                dst_path = images_dir / f'image_{idx:04d}.jpg'\n                shutil.copy2(src_path, dst_path)\n\n                if verbose and idx < 3:\n                    print(f\"  Copied image {idx}: {Path(src_path).name}\")\n            except Exception as e:\n                if verbose:\n                    print(f\"  Error copying image {idx}: {e}\")\n    else:\n        # If no processed images, extract images from the scene\n        if verbose:\n            print(\"  No processed images provided, extracting from scene...\")\n\n        for idx in range(num_views):\n            try:\n                # Save RGB images\n                img_path = images_dir / f'image_{idx:04d}.jpg'\n\n                # Retrieve image data\n                if hasattr(imgs[idx], 'img'):\n                    img = imgs[idx].img\n                elif hasattr(imgs[idx], 'image'):\n                    img = imgs[idx].image\n                else:\n                    img = imgs[idx]\n\n                # Convert tensor to numpy array\n                if isinstance(img, torch.Tensor):\n                    img = img.detach().cpu().numpy()\n\n                # Convert image to correct format\n                if isinstance(img, np.ndarray):\n                    # Convert (C, H, W) -> (H, W, C)\n                    if img.ndim == 3 and img.shape[0] in [1, 3, 4]:\n                        img = np.transpose(img, (1, 2, 0))\n\n                    # Normalize values to [0, 255] range\n                    if img.max() <= 1.0:\n                        img = (img * 255).astype(np.uint8)\n                    else:\n                        img = img.astype(np.uint8)\n\n                    # Convert grayscale to RGB\n                    if img.ndim == 2:\n                        img = np.stack([img, img, img], axis=-1)\n                    elif img.shape[-1] == 1:\n                        img = np.repeat(img, 3, axis=-1)\n\n                    # Save the image\n                    Image.fromarray(img).save(img_path)\n\n                    if verbose and idx < 3:\n                        print(f\"  Saved image {idx}: {img_path}\")\n            except Exception as e:\n                if verbose:\n                    print(f\"  Error saving image {idx}: {e}\")\n\n    # Save depth maps\n    try:\n        if hasattr(scene, 'get_depthmaps'):\n            depthmaps = scene.get_depthmaps()\n            if depthmaps is not None:\n                for idx in range(min(num_views, len(depthmaps))):\n                    depth = depthmaps[idx]\n                    if isinstance(depth, torch.Tensor):\n                        depth = depth.detach().cpu().numpy()\n\n                    if isinstance(depth, np.ndarray):\n                        depth_path = depth_dir / f'depth_{idx:04d}.npy'\n                        np.save(depth_path, depth)\n\n                        if verbose and idx < 3:\n                            print(f\"  Saved depth {idx}: {depth_path}\")\n    except Exception as e:\n        if verbose:\n            print(f\"  Note: Could not save depth maps: {e}\")\n\n    # Save masks\n    try:\n        if hasattr(scene, 'get_masks'):\n            masks = scene.get_masks()\n            if masks is not None:\n                for idx in range(min(num_views, len(masks))):\n                    mask = masks[idx]\n                    if isinstance(mask, torch.Tensor):\n                        mask = mask.detach().cpu().numpy()\n\n                    if isinstance(mask, np.ndarray):\n                        mask_path = mask_dir / f'mask_{idx:04d}.png'\n                        mask_img = (mask * 255).astype(np.uint8)\n                        Image.fromarray(mask_img).save(mask_path)\n\n                        if verbose and idx < 3:\n                            print(f\"  Saved mask {idx}: {mask_path}\")\n    except Exception as e:\n        if verbose:\n            print(f\"  Note: Could not save masks: {e}\")\n\n    if verbose:\n        print(f\"  Completed saving {num_views} images\")\n\n\ndef extract_scene_data(scene, min_conf_thr, verbose):\n    \"\"\"Extract cameras, images, and 3D points from MASt3R scene\"\"\"\n    cameras = {}\n    images_data = {}\n    points3D = []\n\n    if verbose:\n        print(\"\\nExtracting scene data...\")\n\n    # Check scene structure\n    if hasattr(scene, 'imgs'):\n        num_views = len(scene.imgs)\n        imgs = scene.imgs\n    elif hasattr(scene, 'views'):\n        num_views = len(scene.views)\n        imgs = scene.views\n    else:\n        num_views = 0\n        imgs = []\n\n    if verbose:\n        print(f\"Number of views: {num_views}\")\n\n    # Extract camera parameters and poses\n    for idx in range(num_views):\n        # Get image size\n        if hasattr(scene, 'imshapes') and idx < len(scene.imshapes):\n            height, width = scene.imshapes[idx]\n        else:\n            height, width = 192, 256\n\n        # Get intrinsics\n        fx = fy = 260.0\n        cx = width / 2.0\n        cy = height / 2.0\n\n        try:\n            if hasattr(scene, 'get_intrinsics'):\n                K = scene.get_intrinsics()\n                if K is not None:\n                    if isinstance(K, torch.Tensor):\n                        K = K.detach().cpu().numpy()\n                    if K.ndim >= 2:\n                        K_view = K[idx] if K.ndim == 3 else K\n                        if K_view.shape[0] >= 3 and K_view.shape[1] >= 3:\n                            fx = float(K_view[0, 0])\n                            fy = float(K_view[1, 1])\n                            cx = float(K_view[0, 2])\n                            cy = float(K_view[1, 2])\n        except:\n            pass\n\n        cameras[idx] = {\n            'model': 'PINHOLE',\n            'width': int(width),\n            'height': int(height),\n            'params': [fx, fy, cx, cy]\n        }\n\n        # Get pose\n        qvec = np.array([1.0, 0.0, 0.0, 0.0])\n        tvec = np.array([0.0, 0.0, 0.0])\n\n        try:\n            if hasattr(scene, 'get_im_poses'):\n                poses = scene.get_im_poses()\n                if poses is not None and idx < len(poses):\n                    pose = poses[idx]\n                    if isinstance(pose, torch.Tensor):\n                        pose = pose.detach().cpu().numpy()\n\n                    if isinstance(pose, np.ndarray) and pose.ndim == 2 and pose.shape == (4, 4):\n                        det = np.linalg.det(pose)\n                        if abs(det) > 1e-10:\n                            pose_inv = np.linalg.inv(pose)\n                            qvec, tvec = matrix_to_quaternion_translation(pose_inv)\n        except:\n            pass\n\n        images_data[idx + 1] = {\n            'qvec': qvec,\n            'tvec': tvec,\n            'camera_id': idx,\n            'name': f'image_{idx:04d}.jpg',\n            'xys': np.array([]),\n            'point3D_ids': np.array([])\n        }\n\n    # Extract 3D points WITH COLORS\n    if verbose:\n        print(\"\\nExtracting 3D points with colors...\")\n\n    try:\n        if hasattr(scene, 'get_pts3d'):\n            pts3d = scene.get_pts3d()\n\n            if pts3d is not None:\n                # Handle list of arrays\n                if isinstance(pts3d, list):\n                    all_points = []\n                    all_colors = []\n\n                    for view_idx, pts in enumerate(pts3d):\n                        if isinstance(pts, torch.Tensor):\n                            pts = pts.detach().cpu().numpy()\n                        if isinstance(pts, np.ndarray):\n                            all_points.append(pts.reshape(-1, 3))\n\n                            # Extract colors from corresponding image\n                            if view_idx < len(imgs):\n                                img = imgs[view_idx]\n                                if isinstance(img, torch.Tensor):\n                                    img = img.detach().cpu().numpy()\n\n                                # Convert image format\n                                if img.ndim == 3:\n                                    # (C, H, W) -> (H, W, C)\n                                    if img.shape[0] in [1, 3, 4]:\n                                        img = np.transpose(img, (1, 2, 0))\n\n                                # Normalize to 0-255\n                                if img.max() <= 1.0:\n                                    img = (img * 255).astype(np.uint8)\n                                else:\n                                    img = img.astype(np.uint8)\n\n                                # Handle grayscale\n                                if img.ndim == 2 or img.shape[-1] == 1:\n                                    img = np.stack([img.squeeze()] * 3, axis=-1)\n\n                                # Reshape to match points\n                                img_flat = img.reshape(-1, 3)\n                                all_colors.append(img_flat)\n                            else:\n                                # Default gray if no image available\n                                n_pts = pts.reshape(-1, 3).shape[0]\n                                all_colors.append(np.full((n_pts, 3), 128, dtype=np.uint8))\n\n                    pts3d_combined = np.vstack(all_points) if all_points else None\n                    colors_combined = np.vstack(all_colors) if all_colors else None\n\n                elif isinstance(pts3d, torch.Tensor):\n                    pts3d_combined = pts3d.detach().cpu().numpy().reshape(-1, 3)\n\n                    # Extract colors from first image\n                    if len(imgs) > 0:\n                        img = imgs[0]\n                        if isinstance(img, torch.Tensor):\n                            img = img.detach().cpu().numpy()\n\n                        if img.ndim == 3 and img.shape[0] in [1, 3, 4]:\n                            img = np.transpose(img, (1, 2, 0))\n\n                        if img.max() <= 1.0:\n                            img = (img * 255).astype(np.uint8)\n                        else:\n                            img = img.astype(np.uint8)\n\n                        if img.ndim == 2 or img.shape[-1] == 1:\n                            img = np.stack([img.squeeze()] * 3, axis=-1)\n\n                        colors_combined = img.reshape(-1, 3)\n                    else:\n                        colors_combined = None\n\n                elif isinstance(pts3d, np.ndarray):\n                    pts3d_combined = pts3d.reshape(-1, 3)\n\n                    # Extract colors from first image\n                    if len(imgs) > 0:\n                        img = imgs[0]\n                        if isinstance(img, torch.Tensor):\n                            img = img.detach().cpu().numpy()\n\n                        if img.ndim == 3 and img.shape[0] in [1, 3, 4]:\n                            img = np.transpose(img, (1, 2, 0))\n\n                        if img.max() <= 1.0:\n                            img = (img * 255).astype(np.uint8)\n                        else:\n                            img = img.astype(np.uint8)\n\n                        if img.ndim == 2 or img.shape[-1] == 1:\n                            img = np.stack([img.squeeze()] * 3, axis=-1)\n\n                        colors_combined = img.reshape(-1, 3)\n                    else:\n                        colors_combined = None\n                else:\n                    pts3d_combined = None\n                    colors_combined = None\n\n                if pts3d_combined is not None and len(pts3d_combined) > 0:\n                    # Get confidence\n                    conf_combined = None\n                    if hasattr(scene, 'get_conf'):\n                        conf = scene.get_conf()\n                        if conf is not None:\n                            if isinstance(conf, list):\n                                all_conf = []\n                                for c in conf:\n                                    if isinstance(c, torch.Tensor):\n                                        c = c.detach().cpu().numpy()\n                                    all_conf.append(c.flatten())\n                                conf_combined = np.concatenate(all_conf) if all_conf else None\n                            elif isinstance(conf, torch.Tensor):\n                                conf_combined = conf.detach().cpu().numpy().flatten()\n                            elif isinstance(conf, np.ndarray):\n                                conf_combined = conf.flatten()\n\n                    # Ensure all arrays have the same size\n                    min_size = len(pts3d_combined)\n                    if colors_combined is not None:\n                        min_size = min(min_size, len(colors_combined))\n                    if conf_combined is not None:\n                        min_size = min(min_size, len(conf_combined))\n\n                    pts3d_combined = pts3d_combined[:min_size]\n                    if colors_combined is not None:\n                        colors_combined = colors_combined[:min_size]\n                    else:\n                        colors_combined = np.full((min_size, 3), 128, dtype=np.uint8)\n\n                    # Filter by confidence\n                    if conf_combined is not None and len(conf_combined) > 0:\n                        conf_combined = conf_combined[:min_size]\n                        mask = conf_combined >= min_conf_thr\n                        pts3d_filtered = pts3d_combined[mask]\n                        colors_filtered = colors_combined[mask]\n                    else:\n                        pts3d_filtered = pts3d_combined\n                        colors_filtered = colors_combined\n\n                    # Create point cloud with colors\n                    for pt, color in zip(pts3d_filtered, colors_filtered):\n                        if np.all(np.isfinite(pt)):\n                            points3D.append({\n                                'xyz': pt,\n                                'rgb': color.astype(np.uint8),  #use actual color\n                                'error': 0.0,\n                                'image_ids': np.array([]),\n                                'point2D_idxs': np.array([])\n                            })\n\n                    if verbose:\n                        print(f\"  Extracted {len(points3D)} 3D points with colors\")\n                        print(f\"  Sample colors: {[p['rgb'].tolist() for p in points3D[:3]]}\")\n    except Exception as e:\n        if verbose:\n            print(f\"  Error extracting 3D points: {e}\")\n        import traceback\n        traceback.print_exc()\n\n    if verbose:\n        print(f\"\\nTotal: {len(cameras)} cameras, {len(images_data)} images, {len(points3D)} points\")\n\n    return cameras, images_data, points3D\n\n\n\n\ndef convert_mast3r_to_colmap(scene, output_dir, min_conf_thr=1.5, clean_depth=True,\n                            mask_images=True, verbose=True, processed_image_paths=None,\n                            max_points=100000):\n    \"\"\"\n    Convert MASt3R scene to COLMAP format\n\n    Args:\n        scene: MASt3R optimized scene\n        output_dir: Output directory path\n        min_conf_thr: Minimum confidence threshold for 3D points\n        clean_depth: Whether to clean depth maps\n        mask_images: Whether to apply masks\n        verbose: Print verbose output\n        processed_image_paths: List of paths to processed (square) images\n    \"\"\"\n\n    output_dir = Path(output_dir)\n    sparse_dir = output_dir / \"sparse\" / \"0\"\n    images_dir = output_dir / \"images\"\n    depth_dir = output_dir / \"depth\"\n    normal_dir = output_dir / \"normal\"\n    mask_dir = output_dir / \"mask\"\n\n    # Create directories\n    sparse_dir.mkdir(parents=True, exist_ok=True)\n    images_dir.mkdir(parents=True, exist_ok=True)\n    depth_dir.mkdir(parents=True, exist_ok=True)\n    normal_dir.mkdir(parents=True, exist_ok=True)\n    mask_dir.mkdir(parents=True, exist_ok=True)\n\n    if verbose:\n        print(\"\\n\" + \"=\"*70)\n        print(\"Converting MASt3R scene to COLMAP format\")\n        print(\"=\"*70)\n        print(f\"Output directory: {output_dir}\")\n\n    cameras, images_data, points3D = extract_scene_data(scene, min_conf_thr, verbose)\n\n    #----------------------------down sampling\n    if max_points is not None and len(points3D) > max_points:\n        print(f\"\\nDownsampling 3D points from {len(points3D)} to {max_points}...\")\n\n        if isinstance(points3D, dict):\n            all_ids = list(points3D.keys())\n            sampled_ids = np.random.choice(all_ids, max_points, replace=False)\n            points3D = {idx: points3D[idx] for idx in sampled_ids}\n        elif isinstance(points3D, list):\n            sampled_indices = np.random.choice(len(points3D), max_points, replace=False)\n            points3D = [points3D[i] for i in sampled_indices]\n        else:\n            raise TypeError(f\"points3D must be dict or list, got {type(points3D)}\")\n    #----------------------------down sampling\n\n    save_image_data(scene, images_dir, depth_dir, normal_dir, mask_dir,\n                    min_conf_thr, verbose, processed_image_paths=processed_image_paths)\n\n    if verbose:\n        print(\"\\nWriting COLMAP binary files...\")\n\n    write_cameras_binary(cameras, sparse_dir / \"cameras.bin\")\n    if verbose:\n        print(f\"  βœ“ cameras.bin ({len(cameras)} cameras)\")\n\n    write_images_binary(images_data, sparse_dir / \"images.bin\")\n    if verbose:\n        print(f\"  βœ“ images.bin ({len(images_data)} images)\")\n\n    write_points3d_binary(points3D, sparse_dir / \"points3D.bin\")\n    if verbose:\n        print(f\"  βœ“ points3D.bin ({len(points3D)} points)\")\n\n    if verbose:\n        print(\"\\n\" + \"=\"*70)\n        print(\"βœ“ COLMAP conversion complete!\")\n        print(\"=\"*70)\n\n    return output_dir","metadata":{"id":"X2ZCB9Kt3C1w"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# gaussian splat","metadata":{"id":"KUHtLjgsGpBc"}},{"cell_type":"code","source":"# ==========================================\n# Gaussian Splatting Training Function\n# ==========================================\ndef train_gaussian_splatting(colmap_dir, output_dir, iterations=7000):\n    \"\"\"\n    Train a Gaussian Splatting model using COLMAP data.\n\n    Args:\n        colmap_dir: Root directory of COLMAP data (contains sparse/0/*.bin)\n        output_dir: Target directory for Gaussian Splatting output\n        iterations: Number of training iterations\n\n    Returns:\n        output_dir: The path where the trained model is saved\n    \"\"\"\n    import subprocess\n    import os\n    import shutil\n    from pathlib import Path\n\n    print(\"======================================================================\")\n    print(\"Step 5: Gaussian Splatting Training\")\n    print(\"======================================================================\")\n    print(f\"Input COLMAP directory (Root): {colmap_dir}\")\n    print(f\"Output directory: {output_dir}\")\n    print(f\"Iterations: {iterations}\")\n\n    # --- Fix: Set correct search path for COLMAP binaries ---\n    # MASt3R output is located in colmap_dir/sparse/0/*.bin\n    colmap_sparse_src = os.path.join(colmap_dir, \"sparse\", \"0\")\n    required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n\n    # Pre-flight check\n    print(\"\\n[1/4] Checking COLMAP files...\")\n    for filename in required_files:\n        filepath = os.path.join(colmap_sparse_src, filename)\n        if not os.path.exists(filepath):\n            raise FileNotFoundError(\n                f\"Required COLMAP file not found: {filepath}\\n\"\n                f\"Verify if Step 4 correctly saved files to {colmap_sparse_src}\"\n            )\n        print(f\"  βœ“ Found {filename}\")\n\n    # Verify Gaussian Splatting repository\n    gs_repo = \"/content/gaussian-splatting\"\n    if not os.path.exists(gs_repo):\n        raise FileNotFoundError(f\"Gaussian Splatting repository not found: {gs_repo}\")\n\n    # --- Prepare Directory Structure ---\n    # The GS train.py expects the following structure:\n    # output_dir/\n    # β”œβ”€β”€ images/\n    # └── sparse/0/*.bin\n\n    print(\"\\n[2/4] Preparing directory structure...\")\n    images_dst_dir = os.path.join(output_dir, 'images')\n    sparse_dst_dir = os.path.join(output_dir, 'sparse', '0')\n    os.makedirs(images_dst_dir, exist_ok=True)\n    os.makedirs(sparse_dst_dir, exist_ok=True)\n    print(f\"  βœ“ Created {images_dst_dir}\")\n    print(f\"  βœ“ Created {sparse_dst_dir}\")\n\n    # --- Copy Images (Processed/Split images) ---\n    # Retrieve images from 'processed_images' located alongside the colmap_dir\n    print(\"\\n[3/4] Copying processed images...\")\n    processed_images_src = os.path.join(os.path.dirname(colmap_dir), 'processed_images')\n\n    if not os.path.exists(processed_images_src):\n        raise FileNotFoundError(\n            f\"Processed images directory not found: {processed_images_src}\\n\"\n            f\"Expected location: {os.path.dirname(colmap_dir)}/processed_images\"\n        )\n\n    # Copy image files and keep a count\n    copied_count = 0\n    image_extensions = ('.jpg', '.jpeg', '.png', '.JPG', '.JPEG', '.PNG')\n\n    for img in sorted(os.listdir(processed_images_src)):\n        if img.lower().endswith(image_extensions):\n            src = os.path.join(processed_images_src, img)\n            dst = os.path.join(images_dst_dir, img)\n            shutil.copy2(src, dst)\n            copied_count += 1\n\n    if copied_count == 0:\n        raise RuntimeError(f\"No images found in {processed_images_src}\")\n\n    print(f\"  βœ“ Copied {copied_count} images from {processed_images_src}\")\n    print(f\"  βœ“ Images prepared in {images_dst_dir}\")\n\n    # --- Copy COLMAP Binaries ---\n    print(\"\\n[4/4] Copying COLMAP sparse reconstruction...\")\n    for filename in required_files:\n        src = os.path.join(colmap_sparse_src, filename)\n        dst = os.path.join(sparse_dst_dir, filename)\n        # Avoid error if src and dst are the same path\n        if os.path.abspath(src) != os.path.abspath(dst):\n            shutil.copy2(src, dst)\n            file_size = os.path.getsize(dst)\n            print(f\"  βœ“ Copied {filename} ({file_size:,} bytes)\")\n\n    print(f\"  βœ“ COLMAP files prepared in {sparse_dst_dir}\")\n\n    # --- Construct Execution Command ---\n    # Set the parent directory (containing 'images' and 'sparse/0') as the source (-s)\n    print(\"\\n\" + \"=\"*70)\n    print(\"Starting Gaussian Splatting Training...\")\n    print(\"=\"*70)\n\n    cmd = [\n        \"python\", os.path.join(gs_repo, \"train.py\"),\n        \"-s\", output_dir,  # Use prepared directory as source\n        \"-m\", output_dir,  # Output training results to the same directory\n        \"--iterations\", str(iterations),\n        \"--test_iterations\", \"-1\",\n        \"--save_iterations\", str(iterations),  # Save only the final result\n        \"--checkpoint_iterations\", \"-1\",\n        \"--quiet\"\n    ]\n\n    print(f\"Command: {' '.join(cmd)}\\n\")\n\n    # Execute training\n    result = subprocess.run(cmd, capture_output=True, text=True)\n\n    if result.returncode != 0:\n        print(\"\\n\" + \"=\"*70)\n        print(\"❌ Training failed!\")\n        print(\"=\"*70)\n        print(\"\\n--- STDOUT ---\")\n        print(result.stdout)\n        print(\"\\n--- STDERR ---\")\n        print(result.stderr)\n        print(\"=\"*70)\n        raise RuntimeError(\"Gaussian Splatting training failed\")\n\n    print(\"\\n\" + \"=\"*70)\n    print(\"βœ“ Training complete!\")\n    print(\"=\"*70)\n    print(f\"Model saved to: {output_dir}\")\n    print(f\"Point cloud: {os.path.join(output_dir, 'point_cloud', f'iteration_{iterations}')}\")\n\n    return output_dir","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T08:56:35.733855Z","iopub.execute_input":"2026-02-02T08:56:35.734036Z","iopub.status.idle":"2026-02-02T08:56:35.751392Z","shell.execute_reply.started":"2026-02-02T08:56:35.734019Z","shell.execute_reply":"2026-02-02T08:56:35.75091Z"},"id":"HTSHOx23GpBc"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"ElzSgpJ6GpBc"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# main_pipeline","metadata":{"id":"wnNtrHf7GpBc"}},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024, iterations=7000,\n                  max_images=None, max_pairs=None, max_points=1000000):\n    \"\"\"\n    Complete Process3 Pipeline:\n    Biplet β†’ DINO β†’ MASt3R β†’ COLMAP β†’ Gaussian Splatting\n    \"\"\"\n    import os\n    import torch\n\n    os.makedirs(output_dir, exist_ok=True)\n\n    # ==========================================\n    # Step 1: Biplet-Square Normalization\n    # ==========================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"Step 1: Biplet-Square Normalization\")\n    print(\"=\"*70)\n\n    processed_dir, image_paths = normalize_image_sizes_biplet(\n        input_dir=image_dir,\n        output_dir=os.path.join(output_dir, 'processed_images'),\n        size=square_size,\n    )\n\n    # ==========================================\n    # Step 2: DINO Pair Selection\n    # ==========================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"Step 2: DINO Pair Selection\")\n    print(\"=\"*70)\n\n    pairs = get_image_pairs_dino(\n        image_paths=image_paths,\n        max_pairs=max_pairs\n    )\n\n    # ==========================================\n    # Step 3: MASt3R Reconstruction\n    # ==========================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"Step 3: MASt3R Reconstruction\")\n    print(\"=\"*70)\n\n    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n    model = load_mast3r_model(device)\n\n    scene, mast3r_images = run_mast3r_pairs(\n        model=model,\n        image_paths=image_paths,\n        pairs=pairs, device=device,\n        max_pairs=max_pairs\n    )\n\n    # Clean up model\n    del model\n    clear_memory()\n\n    # ==========================================\n    # Step 4: Convert to COLMAP Format\n    # ==========================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"Step 4: COLMAP Conversion\")\n    print(\"=\"*70)\n\n    colmap_dir = convert_mast3r_to_colmap(\n        scene=scene,\n        output_dir=os.path.join(output_dir, 'colmap'),\n        min_conf_thr=1.5,max_points=max_points\n    )\n\n    #---------------\n\n    import shutil\n\n    src_dir = '/content/output/colmap/images'\n    dst_dir = '/content/output/gaussian_splatting/images'\n\n    os.makedirs(dst_dir, exist_ok=True)\n\n    files = os.listdir(src_dir)\n    for f in files:\n        if f.startswith('image_') and f.endswith('.jpg'):\n            src_path = os.path.join(src_dir, f)\n            dst_path = os.path.join(dst_dir, f)\n\n            if not os.path.exists(dst_path):\n                shutil.copy2(src_path, dst_path)\n\n    print(f\"Copied {len(files)} files to {dst_dir}\")\n\n    #-----------------\n\n    # ==========================================\n    # Step 5: Gaussian Splatting Training\n    # ==========================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"Step 5: Gaussian Splatting Training\")\n    print(\"=\"*70)\n\n    # 'colmap_output' is a Path object pointing to 'output_dir/colmap'.\n    # This directory contains the generated 'sparse/0/*.bin' files.\n    colmap_root = '/content/output/colmap'#str(colmap_output)\n\n    # Define the output directory for Gaussian Splatting\n    gs_output_dir = os.path.join(output_dir, 'gaussian_splatting')\n\n    # Call the existing 'train_gaussian_splatting' function.\n    # Standard GS practice is to pass the parent directory containing the 'sparse' folder.\n    gs_output = train_gaussian_splatting(\n        colmap_dir=colmap_root, # This is the crucial path\n        output_dir=gs_output_dir,\n        iterations=iterations\n    )\n\n    return gs_output","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T09:19:47.375104Z","iopub.execute_input":"2026-02-02T09:19:47.375866Z","iopub.status.idle":"2026-02-02T09:19:47.38652Z","shell.execute_reply.started":"2026-02-02T09:19:47.375831Z","shell.execute_reply":"2026-02-02T09:19:47.38583Z"},"id":"GcNTYU67GpBc"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"JmsYT2hVGpBc"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# execute","metadata":{"id":"Ce5fNJCEGpBc"}},{"cell_type":"code","source":"if __name__ == \"__main__\":\n\n    IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n    OUTPUT_DIR = \"/content/output\"\n\n    gs_output = main_pipeline(\n        image_dir=IMAGE_DIR,\n        output_dir=OUTPUT_DIR,\n        square_size=1024,\n        iterations=3000,\n        max_images=30,\n        max_pairs=1000,\n        max_points=100000\n    )\n\n    print(f\"\\n{'='*70}\")\n    print(\"Pipeline completed successfully!\")\n    print(f\"{'='*70}\")\n    print(f\"Gaussian Splatting output: {gs_output}\")","metadata":{"execution":{"iopub.status.busy":"2026-02-02T09:19:51.510722Z","iopub.execute_input":"2026-02-02T09:19:51.51143Z","iopub.status.idle":"2026-02-02T09:23:11.510434Z","shell.execute_reply.started":"2026-02-02T09:19:51.511399Z","shell.execute_reply":"2026-02-02T09:23:11.509646Z"},"papermill":{"duration":905.62414,"end_time":"2026-01-20T01:22:29.355023","exception":false,"start_time":"2026-01-20T01:07:23.730883","status":"completed"},"tags":[],"_kg_hide-output":true,"trusted":true,"id":"xgMLHPPpGpBd","outputId":"f3637d4f-b7f6-4325-806e-39326a1ba02a"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"DPfmJdNvGpBd"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"!apt-get install -y tree > /dev/null","metadata":{"id":"VQsLeKY8Rl8Y","papermill":{"duration":0.154679,"end_time":"2026-01-20T01:22:29.976313","exception":false,"start_time":"2026-01-20T01:22:29.821634","status":"completed"},"tags":[],"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T09:10:56.433222Z","iopub.execute_input":"2026-02-02T09:10:56.433994Z","iopub.status.idle":"2026-02-02T09:11:06.952416Z","shell.execute_reply.started":"2026-02-02T09:10:56.433963Z","shell.execute_reply":"2026-02-02T09:11:06.95163Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"!tree /content/output","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-02-02T09:17:58.224254Z","iopub.execute_input":"2026-02-02T09:17:58.225096Z","iopub.status.idle":"2026-02-02T09:17:58.5013Z","shell.execute_reply.started":"2026-02-02T09:17:58.225063Z","shell.execute_reply":"2026-02-02T09:17:58.500415Z"},"id":"O3F9m-VjGpBd","outputId":"814a6a56-23ca-4138-b69b-bbef1f9e262e"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"trusted":true,"id":"W0Y_jaLoGpBd"},"outputs":[],"execution_count":null}]}