File size: 67,221 Bytes
2266195
1
{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"d54a3ca373d748c3b6063f7944d320f5":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_bef87068a4d94bef9dfce33a9d23e089","IPY_MODEL_8f3ae5baabbf44f3bf925d8cd3d6a664","IPY_MODEL_ab45b6e07fa74763abee8739860ef802"],"layout":"IPY_MODEL_91cf3d40894944d2a6e5731c38ebc6d8"}},"bef87068a4d94bef9dfce33a9d23e089":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_3ff85810885447dd916ab049d2f71bc1","placeholder":"โ€‹","style":"IPY_MODEL_d7cf74fecf3c4688a72ce6553c022b47","value":"preprocessor_config.json:โ€‡100%"}},"8f3ae5baabbf44f3bf925d8cd3d6a664":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_d7c893483ed64098af4bf9fb3d52511f","max":436,"min":0,"orientation":"horizontal","style":"IPY_MODEL_b4372bf488d842dd9d41633d838e97fe","value":436}},"ab45b6e07fa74763abee8739860ef802":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_5501ecdc0c3e4920a04879ffc8335449","placeholder":"โ€‹","style":"IPY_MODEL_7b082beb8b304620b20cca2b117d3d19","value":"โ€‡436/436โ€‡[00:00&lt;00:00,โ€‡21.4kB/s]"}},"91cf3d40894944d2a6e5731c38ebc6d8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"3ff85810885447dd916ab049d2f71bc1":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"d7cf74fecf3c4688a72ce6553c022b47":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"d7c893483ed64098af4bf9fb3d52511f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"b4372bf488d842dd9d41633d838e97fe":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"5501ecdc0c3e4920a04879ffc8335449":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7b082beb8b304620b20cca2b117d3d19":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"3caf6ce3be86419eab4b3a5cc92c0f3e":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f84dd43b90ff4f5badcb411ef170713a","IPY_MODEL_def430e09227485d854321e94fb17549","IPY_MODEL_f94ec42f81084a399ee425790cb3cf8a"],"layout":"IPY_MODEL_d8d50fd791fd4098ab1691b3a3a23c50"}},"f84dd43b90ff4f5badcb411ef170713a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e479418ea8144971873f7e2c195d6002","placeholder":"โ€‹","style":"IPY_MODEL_ae4a9989d81b446299751cb1f1d68f97","value":"config.json:โ€‡100%"}},"def430e09227485d854321e94fb17549":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6f7ac7e4c90645dfae815d4a3c54459b","max":548,"min":0,"orientation":"horizontal","style":"IPY_MODEL_0a82e9b8ab35464bbdcae65f2bf35d5a","value":548}},"f94ec42f81084a399ee425790cb3cf8a":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_a61c105a4ed546fd907041026dc87beb","placeholder":"โ€‹","style":"IPY_MODEL_9e55321fa5cb4ef8a170e7ec6a9aaa9f","value":"โ€‡548/548โ€‡[00:00&lt;00:00,โ€‡17.7kB/s]"}},"d8d50fd791fd4098ab1691b3a3a23c50":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"e479418ea8144971873f7e2c195d6002":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ae4a9989d81b446299751cb1f1d68f97":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6f7ac7e4c90645dfae815d4a3c54459b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0a82e9b8ab35464bbdcae65f2bf35d5a":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"a61c105a4ed546fd907041026dc87beb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9e55321fa5cb4ef8a170e7ec6a9aaa9f":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"884613eba51d462b89ee37461de1af2d":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_80d18b82487548818e2844f1708ff058","IPY_MODEL_f3a4ec7390064c848a377a85d8aa09de","IPY_MODEL_783b7fa88d30476ca3b75e43fb33dbf7"],"layout":"IPY_MODEL_6f5989c42d6746519c6bde617ed0f8e7"}},"80d18b82487548818e2844f1708ff058":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_87341d9784a34784b9a7209189bc2581","placeholder":"โ€‹","style":"IPY_MODEL_46eae6702f3c4614a1e444d3b3f3da18","value":"model.safetensors:โ€‡100%"}},"f3a4ec7390064c848a377a85d8aa09de":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_dbf5c8875e9f46ccb647b4204fa429a5","max":346345912,"min":0,"orientation":"horizontal","style":"IPY_MODEL_22523fb1c2964a049a65b0e2b0f3b5ec","value":346345912}},"783b7fa88d30476ca3b75e43fb33dbf7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_8dfdb681aad14507b2709740f2e58362","placeholder":"โ€‹","style":"IPY_MODEL_0412c710886045a6a9bbc752582202f3","value":"โ€‡346M/346Mโ€‡[00:03&lt;00:00,โ€‡143MB/s]"}},"6f5989c42d6746519c6bde617ed0f8e7":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"87341d9784a34784b9a7209189bc2581":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46eae6702f3c4614a1e444d3b3f3da18":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"dbf5c8875e9f46ccb647b4204fa429a5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"22523fb1c2964a049a65b0e2b0f3b5ec":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"8dfdb681aad14507b2709740f2e58362":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"0412c710886045a6a9bbc752582202f3":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"f788806a652747f7a20dbcdd51b09f6a":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_f29a9fccd0fc434baa33de09787f6a23","IPY_MODEL_2e1519a3c5604327b0dba766b4991e6c","IPY_MODEL_db7c7b60e2934671a2fab26567da72f7"],"layout":"IPY_MODEL_60de62d02d6742f8ade69790686576e5"}},"f29a9fccd0fc434baa33de09787f6a23":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_894372b0803243448de8c28636d6a699","placeholder":"โ€‹","style":"IPY_MODEL_70c0f25009a442a7a7d391ddbac4a69e","value":"Loadingโ€‡weights:โ€‡100%"}},"2e1519a3c5604327b0dba766b4991e6c":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_5eed7b16dfe2443cac3a0601e438be41","max":223,"min":0,"orientation":"horizontal","style":"IPY_MODEL_248829e8205e47f1a2f316055002e0d4","value":223}},"db7c7b60e2934671a2fab26567da72f7":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_fa00b8691718403ca9a2c7d23d06bab6","placeholder":"โ€‹","style":"IPY_MODEL_ea24828291cd41e7b6fa5946b55d426e","value":"โ€‡223/223โ€‡[00:00&lt;00:00,โ€‡443.22it/s,โ€‡Materializingโ€‡param=layernorm.weight]"}},"60de62d02d6742f8ade69790686576e5":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"894372b0803243448de8c28636d6a699":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"70c0f25009a442a7a7d391ddbac4a69e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"5eed7b16dfe2443cac3a0601e438be41":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"248829e8205e47f1a2f316055002e0d4":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"fa00b8691718403ca9a2c7d23d06bab6":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"ea24828291cd41e7b6fa5946b55d426e":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}},"kaggle":{"accelerator":"none","dataSources":[],"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":false}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **biplet-dino-lightglue-colmap-gs**\n","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"# **setup**","metadata":{"id":"vXt8y7QyyRn9"}},{"cell_type":"code","source":"from google.colab import drive\ndrive.mount('/content/drive')","metadata":{"id":"t9kAhlZHTIqC","outputId":"875640ed-fd53-4ab2-e6cb-d623898f925d"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\nimport sys\nimport subprocess\nfrom PIL import Image\n\ndef run_cmd(cmd, check=True, capture=False):\n    \"\"\"Run command with better error handling\"\"\"\n    print(f\"Running: {' '.join(cmd)}\")\n    result = subprocess.run(\n        cmd,\n        capture_output=capture,\n        text=True,\n        check=False\n    )\n    if check and result.returncode != 0:\n        print(f\"โŒ Command failed with code {result.returncode}\")\n        if capture:\n            print(f\"STDOUT: {result.stdout}\")\n            print(f\"STDERR: {result.stderr}\")\n    return result\n\ndef setup_environment():\n    \"\"\"\n    Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n    Python 3.12 compatible version (v8)\n    \"\"\"\n\n    print(\"๐Ÿš€ Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n\n    WORK_DIR = \"/content/gaussian-splatting\"\n\n    # =====================================================================\n    # STEP 0: NumPy FIX (Python 3.12 compatible)\n    # =====================================================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n    print(\"=\"*70)\n\n    # Python 3.12 requires numpy >= 1.26\n    #run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n    #run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy\"]) #no need to be 1.26.4\n\n    # sanity check\n    #run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n\n    # =====================================================================\n    # STEP 1: System packages (Colab)\n    # =====================================================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"STEP 1: System packages\")\n    print(\"=\"*70)\n\n    run_cmd([\"apt-get\", \"update\", \"-qq\"])\n    run_cmd([\n        \"apt-get\", \"install\", \"-y\", \"-qq\",\n        \"colmap\",\n        \"build-essential\",\n        \"cmake\",\n        \"git\",\n        \"libopenblas-dev\",\n        \"xvfb\"\n    ])\n\n    # virtual display (COLMAP / OpenCV safety)\n    os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n    os.environ[\"DISPLAY\"] = \":99\"\n    subprocess.Popen(\n        [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n        stdout=subprocess.DEVNULL,\n        stderr=subprocess.DEVNULL\n    )\n\n    # =====================================================================\n    # STEP 2: Clone Gaussian Splatting\n    # =====================================================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"STEP 2: Clone Gaussian Splatting\")\n    print(\"=\"*70)\n\n    if not os.path.exists(WORK_DIR):\n        run_cmd([\n            \"git\", \"clone\", \"--recursive\",\n            \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n            WORK_DIR\n        ])\n    else:\n        print(\"โœ“ Repository already exists\")\n\n    # =====================================================================\n    # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n    # =====================================================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"STEP 3: Python packages (VERBOSE MODE)\")\n    print(\"=\"*70)\n\n    # ---- PyTorch (Colab CUDAๅฏพๅฟœ) ----\n    print(\"\\n๐Ÿ“ฆ Installing PyTorch...\")\n    run_cmd([\n        sys.executable, \"-m\", \"pip\", \"install\",\n        \"torch\", \"torchvision\", \"torchaudio\"\n    ])\n\n    # ---- Core utils ----\n    print(\"\\n๐Ÿ“ฆ Installing core utilities...\")\n    run_cmd([\n        sys.executable, \"-m\", \"pip\", \"install\",\n        \"opencv-python\",\n        \"pillow\",\n        \"imageio\",\n        \"imageio-ffmpeg\",\n        \"plyfile\",\n        \"tqdm\",\n        \"tensorboard\"\n    ])\n\n    # ---- transformers (NumPy 1.26 compatible) ----\n    print(\"\\n๐Ÿ“ฆ Installing transformers...\")\n    # Install transformers with proper dependencies\n    run_cmd([\n        sys.executable, \"-m\", \"pip\", \"install\",\n        \"transformers>=4.45.0\"\n    ])\n\n    # ---- LightGlue stack (GITHUB INSTALL) ----\n    print(\"\\n๐Ÿ“ฆ Installing LightGlue stack...\")\n\n    # Install kornia first\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n\n    # Install h5py (sometimes needed)\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n\n    # Install matplotlib (LightGlue dependency)\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n\n    # Install LightGlue directly from GitHub (more reliable)\n    print(\"  Installing LightGlue from GitHub...\")\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\",\n            \"git+https://github.com/cvg/LightGlue.git\"])\n\n    # Install pycolmap\n    run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n    # =====================================================================\n    # STEP 4: Build GS submodules\n    # =====================================================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"STEP 4: Build Gaussian Splatting submodules\")\n    print(\"=\"*70)\n\n    submodules = {\n        \"diff-gaussian-rasterization\":\n            \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n        \"simple-knn\":\n            \"https://github.com/camenduru/simple-knn.git\"\n    }\n\n    for name, repo in submodules.items():\n        print(f\"\\n๐Ÿ“ฆ Installing {name}...\")\n        path = os.path.join(WORK_DIR, \"submodules\", name)\n        if not os.path.exists(path):\n            run_cmd([\"git\", \"clone\", repo, path])\n        run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n    # =====================================================================\n    # STEP 5: Detailed Verification\n    # =====================================================================\n    print(\"\\n\" + \"=\"*70)\n    print(\"STEP 5: Detailed Verification\")\n    print(\"=\"*70)\n\n    # NumPy (verify version first)\n    print(\"\\n๐Ÿ” Testing NumPy...\")\n    try:\n        import numpy as np\n        print(f\"  โœ“ NumPy: {np.__version__}\")\n    except Exception as e:\n        print(f\"  โŒ NumPy failed: {e}\")\n\n    # PyTorch\n    print(\"\\n๐Ÿ” Testing PyTorch...\")\n    try:\n        import torch\n        print(f\"  โœ“ PyTorch: {torch.__version__}\")\n        print(f\"  โœ“ CUDA available: {torch.cuda.is_available()}\")\n        if torch.cuda.is_available():\n            print(f\"  โœ“ CUDA version: {torch.version.cuda}\")\n    except Exception as e:\n        print(f\"  โŒ PyTorch failed: {e}\")\n\n    # transformers\n    print(\"\\n๐Ÿ” Testing transformers...\")\n    try:\n        import transformers\n        print(f\"  โœ“ transformers version: {transformers.__version__}\")\n        from transformers import AutoModel\n        print(f\"  โœ“ AutoModel import: OK\")\n    except Exception as e:\n        print(f\"  โŒ transformers failed: {e}\")\n        print(f\"  Attempting detailed diagnosis...\")\n        result = run_cmd([\n            sys.executable, \"-c\",\n            \"import transformers; print(transformers.__version__)\"\n        ], capture=True)\n        print(f\"  Output: {result.stdout}\")\n        print(f\"  Error: {result.stderr}\")\n\n    # LightGlue\n    print(\"\\n๐Ÿ” Testing LightGlue...\")\n    try:\n        from lightglue import LightGlue, ALIKED\n        print(f\"  โœ“ LightGlue: OK\")\n        print(f\"  โœ“ ALIKED: OK\")\n    except Exception as e:\n        print(f\"  โŒ LightGlue failed: {e}\")\n        print(f\"  Attempting detailed diagnosis...\")\n        result = run_cmd([\n            sys.executable, \"-c\",\n            \"from lightglue import LightGlue\"\n        ], capture=True)\n        print(f\"  Output: {result.stdout}\")\n        print(f\"  Error: {result.stderr}\")\n\n    # pycolmap\n    print(\"\\n๐Ÿ” Testing pycolmap...\")\n    try:\n        import pycolmap\n        print(f\"  โœ“ pycolmap: OK\")\n    except Exception as e:\n        print(f\"  โŒ pycolmap failed: {e}\")\n\n    # kornia\n    print(\"\\n๐Ÿ” Testing kornia...\")\n    try:\n        import kornia\n        print(f\"  โœ“ kornia: {kornia.__version__}\")\n    except Exception as e:\n        print(f\"  โŒ kornia failed: {e}\")\n\n    print(\"\\n\" + \"=\"*70)\n    print(\"โœ… SETUP COMPLETE\")\n    print(\"=\"*70)\n    print(f\"Working dir: {WORK_DIR}\")\n\n\n\n    print()\n    print(\"-------!pip show numpy | grep Version--------\")\n    !pip show numpy | grep Version\n\n\n    return WORK_DIR\n\n\nif __name__ == \"__main__\":\n    setup_environment()","metadata":{"id":"z6cBHbABzZ0F","outputId":"17806e8d-1dad-4882-f56d-7c3276e0c170"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\n\n%cd /content/gaussian-splatting\n\nfiles = ['database.py', 'h5_to_db.py', 'metric.py']\nbase_url = 'https://huggingface.co/stpete2/imc25_utils/resolve/main/'\n\nfor file in files:\n    if not os.path.exists(file):\n        !wget -q {base_url + file}\n        print(f\"โœ“ {file} download complete\")\n    else:\n        print(f\"โœ“ {file} already exists\")\n","metadata":{"id":"eJrkKiCLzt1G","outputId":"850f6c00-2873-4d47-e341-b4f20dfb902a"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **install libraries**","metadata":{"id":"DwyCRLt4yYfx"}},{"cell_type":"code","source":"","metadata":{"id":"uznATYrilO-N"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"from database import COLMAPDatabase, image_ids_to_pair_id\nfrom h5_to_db import add_keypoints, add_matches\nfrom metric import *","metadata":{"id":"WVr8ggyVuq6q"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\"\"\"\nGaussian Splatting Pipeline\nSimple and robust pipeline: LightGlue โ†’ COLMAP โ†’ Gaussian Splatting\n\"\"\"\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\n\n# LightGlue\nfrom lightglue import ALIKED, LightGlue\nfrom lightglue.utils import load_image\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n    # Feature extraction\n    N_KEYPOINTS = 8192\n    IMAGE_SIZE = 1024\n\n    # Pair selection\n    GLOBAL_TOPK = 200\n    MIN_MATCHES = 10\n    RATIO_THR = 1.2\n\n    # Paths\n    DINO_MODEL = \"facebook/dinov2-base\"  # Change if using local path\n\n    # Device\n    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')","metadata":{"id":"7NfrJdMvrPZn"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\ndef normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024, max_images=None):\n    \"\"\"\n    Generates two square crops (Left & Right or Top & Bottom)\n    from each image in a directory and returns the output directory\n    and the list of generated file paths.\n\n    Args:\n        input_dir: Input directory containing source images\n        output_dir: Output directory for processed images\n        size: Target square size (default: 1024)\n        max_images: Maximum number of SOURCE images to process (default: None = all images)\n    \"\"\"\n    if output_dir is None:\n        output_dir = 'output/images_biplet'\n    os.makedirs(output_dir, exist_ok=True)\n\n    print(f\"--- Step 1: Biplet-Square Normalization ---\")\n    print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n    print()\n\n    generated_paths = []\n    converted_count = 0\n    size_stats = {}\n\n    # Sort for consistent processing order\n    image_files = sorted([f for f in os.listdir(input_dir)\n                         if f.lower().endswith(('.jpg', '.jpeg', '.png'))])\n\n    # โ˜… max_images ใงๅ…ƒ็”ปๅƒๆ•ฐใ‚’ๅˆถ้™\n    if max_images is not None:\n        image_files = image_files[:max_images]\n        print(f\"Processing limited to {max_images} source images (will generate {max_images * 2} cropped images)\")\n\n    for img_file in image_files:\n        input_path = os.path.join(input_dir, img_file)\n        try:\n            img = Image.open(input_path)\n            original_size = img.size\n\n            # Tracking original aspect ratios\n            size_key = f\"{original_size[0]}x{original_size[1]}\"\n            size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n            # Generate 2 crops using the helper function\n            crops = generate_two_crops(img, size)\n            base_name, ext = os.path.splitext(img_file)\n\n            for mode, cropped_img in crops.items():\n                output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n                cropped_img.save(output_path, quality=95)\n                generated_paths.append(output_path)\n\n            converted_count += 1\n            print(f\"  โœ“ {img_file}: {original_size} โ†’ 2 square images generated\")\n\n        except Exception as e:\n            print(f\"  โœ— Error processing {img_file}: {e}\")\n\n    print(f\"\\nProcessing complete: {converted_count} source images processed\")\n    print(f\"Total output images: {len(generated_paths)}\")\n    print(f\"Original size distribution: {size_stats}\")\n\n    return output_dir, generated_paths\n\n\n\ndef generate_two_crops(img, size):\n    \"\"\"\n    Crops the image into a square and returns 2 variations\n    (Left/Right for landscape, Top/Bottom for portrait).\n    \"\"\"\n    width, height = img.size\n    crop_size = min(width, height)\n    crops = {}\n\n    if width > height:\n        # Landscape โ†’ Left & Right\n        positions = {\n            'left': 0,\n            'right': width - crop_size\n        }\n        for mode, x_offset in positions.items():\n            box = (x_offset, 0, x_offset + crop_size, crop_size)\n            crops[mode] = img.crop(box).resize(\n                (size, size),\n                Image.Resampling.LANCZOS\n            )\n\n    else:\n        # Portrait or Square โ†’ Top & Bottom\n        positions = {\n            'top': 0,\n            'bottom': height - crop_size\n        }\n        for mode, y_offset in positions.items():\n            box = (0, y_offset, crop_size, y_offset + crop_size)\n            crops[mode] = img.crop(box).resize(\n                (size, size),\n                Image.Resampling.LANCZOS\n            )\n\n    return crops","metadata":{"id":"A6smO9X0el3d"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 1: Image Pair Selection (DINO + ALIKED local verify)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n    \"\"\"Load image as torch tensor\"\"\"\n    from PIL import Image\n    import torchvision.transforms as T\n\n    img = Image.open(fname).convert('RGB')\n    transform = T.Compose([\n        T.ToTensor(),\n    ])\n    return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n    \"\"\"Extract DINO global descriptors\"\"\"\n    print(\"\\n=== Extracting DINO Global Features ===\")\n\n    processor = AutoImageProcessor.from_pretrained(model_path)\n    model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n    global_descs = []\n    for img_path in tqdm(image_paths):\n        img = load_torch_image(img_path, device)\n        with torch.no_grad():\n            inputs = processor(images=img, return_tensors=\"pt\", do_rescale=False).to(device)\n            outputs = model(**inputs)\n            desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n            global_descs.append(desc.cpu())\n\n    global_descs = torch.cat(global_descs, dim=0)\n\n    del model\n    torch.cuda.empty_cache()\n    gc.collect()\n\n    return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n    \"\"\"Build top-k similar pairs from global features\"\"\"\n    g = global_feats.to(device)\n    sim = g @ g.T\n    sim.fill_diagonal_(-1)\n\n    N = sim.size(0)\n    k = min(k, N - 1)\n\n    topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n    pairs = []\n    for i in range(N):\n        for j in topk_indices[i]:\n            j = j.item()\n            if i < j:\n                pairs.append((i, j))\n\n    return list(set(pairs))\n\ndef extract_aliked_features(image_paths, device):\n    \"\"\"Extract ALIKED local features\"\"\"\n    print(\"\\n=== Extracting ALIKED Local Features ===\")\n\n    extractor = ALIKED(\n        model_name=\"aliked-n16\",\n        max_num_keypoints=Config.N_KEYPOINTS,\n        detection_threshold=0.01,\n        resize=Config.IMAGE_SIZE\n    ).eval().to(device)\n\n    features = []\n    for img_path in tqdm(image_paths):\n        img = load_torch_image(img_path, device)\n        with torch.no_grad():\n            feats = extractor.extract(img)\n            kpts = feats['keypoints'].reshape(-1, 2).cpu()\n            descs = feats['descriptors'].reshape(len(kpts), -1).cpu()\n            features.append({'keypoints': kpts, 'descriptors': descs})\n\n    del extractor\n    torch.cuda.empty_cache()\n    gc.collect()\n\n    return features\n\ndef verify_pairs_locally(pairs, features, device, threshold=Config.MIN_MATCHES):\n    \"\"\"Verify pairs using local descriptor matching\"\"\"\n    print(\"\\n=== Verifying Pairs with Local Features ===\")\n\n    verified = []\n    for i, j in tqdm(pairs):\n        desc1 = features[i]['descriptors'].to(device)\n        desc2 = features[j]['descriptors'].to(device)\n\n        if len(desc1) == 0 or len(desc2) == 0:\n            continue\n\n        # Simple mutual nearest neighbor\n        dist = torch.cdist(desc1, desc2, p=2)\n        min_dist = dist.min(dim=1)[0]\n        n_matches = (min_dist < Config.RATIO_THR).sum().item()\n\n        if n_matches >= threshold:\n            verified.append((i, j))\n\n    return verified\n\ndef get_image_pairs(image_paths):\n    \"\"\"Main pair selection pipeline\"\"\"\n    device = Config.DEVICE\n\n    # 1. DINO global\n    global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n    pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n    print(f\"Initial pairs from global features: {len(pairs)}\")\n\n    # 2. ALIKED local\n    features = extract_aliked_features(image_paths, device)\n\n    # 3. Local verification\n    verified_pairs = verify_pairs_locally(pairs, features, device)\n\n    print(f\"Verified pairs: {len(verified_pairs)}\")\n\n    return verified_pairs, features","metadata":{"id":"FNjFURfYmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 2: Feature Matching (ALIKED + LightGlue)\n# ============================================================================\n\ndef match_pairs_lightglue(image_paths, pairs, features, output_dir):\n    \"\"\"\n    Match image pairs using LightGlue\n    \"\"\"\n    print(\"\\n=== Matching with LightGlue ===\")\n\n    os.makedirs(output_dir, exist_ok=True)\n    keypoints_path = os.path.join(output_dir, 'keypoints.h5')\n    matches_path = os.path.join(output_dir, 'matches.h5')\n\n    if os.path.exists(keypoints_path):\n        os.remove(keypoints_path)\n    if os.path.exists(matches_path):\n        os.remove(matches_path)\n\n    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n    extractor = ALIKED(max_num_keypoints=4096, detection_threshold=0.2, nms_radius=2).eval().to(device)\n    matcher = LightGlue(features='aliked').eval().to(device)\n\n    if isinstance(features, dict):\n        all_keypoints = features['keypoints']\n        all_descriptors = features['descriptors']\n    elif isinstance(features, list):\n        all_keypoints = [f['keypoints'] for f in features]\n        all_descriptors = [f['descriptors'] for f in features]\n    else:\n        raise ValueError(f\"Unsupported features type: {type(features)}\")\n\n    with h5py.File(keypoints_path, 'w') as f_kp:\n        for idx, img_path in enumerate(tqdm(image_paths, desc=\"Saving keypoints\")):\n            img_name = os.path.splitext(os.path.basename(img_path))[0]\n\n            kp = all_keypoints[idx]\n            if torch.is_tensor(kp):\n                kp = kp.cpu().numpy()\n            f_kp.create_dataset(img_name, data=kp)\n\n    # Match pairs\n    with h5py.File(matches_path, 'w') as f_match:\n        for idx1, idx2 in tqdm(pairs, desc=\"Matching\"):\n            with torch.no_grad():\n                kp0 = all_keypoints[idx1]\n                kp1 = all_keypoints[idx2]\n                desc0 = all_descriptors[idx1]\n                desc1 = all_descriptors[idx2]\n\n                if isinstance(kp0, np.ndarray):\n                    kp0 = torch.from_numpy(kp0).float().to(device)\n                    kp1 = torch.from_numpy(kp1).float().to(device)\n                    desc0 = torch.from_numpy(desc0).float().to(device)\n                    desc1 = torch.from_numpy(desc1).float().to(device)\n                else:\n                    kp0 = kp0.float().to(device)\n                    kp1 = kp1.float().to(device)\n                    desc0 = desc0.float().to(device)\n                    desc1 = desc1.float().to(device)\n\n                feats0 = {\n                    'keypoints': kp0.unsqueeze(0) if kp0.dim() == 2 else kp0,\n                    'descriptors': desc0.unsqueeze(0) if desc0.dim() == 2 else desc0,\n                }\n                feats1 = {\n                    'keypoints': kp1.unsqueeze(0) if kp1.dim() == 2 else kp1,\n                    'descriptors': desc1.unsqueeze(0) if desc1.dim() == 2 else desc1,\n                }\n\n                matches01 = matcher({'image0': feats0, 'image1': feats1})\n\n                if 'matches0' in matches01:\n                    matches0 = matches01['matches0'].cpu().numpy()\n                    if matches0.ndim > 1:\n                        matches0 = matches0[0]\n                    valid = matches0 > -1\n                    matches = np.stack([np.where(valid)[0], matches0[valid]], axis=1)\n                elif 'matches' in matches01:\n                    m = matches01['matches']\n                    if isinstance(m, list):\n                        matches = np.array(m)\n                    elif hasattr(m, 'cpu'):\n                        matches = m.cpu().numpy()\n                    else:\n                        matches = np.array(m)\n                else:\n                    continue\n\n                if len(matches) > 0:\n                    img_name1 = os.path.splitext(os.path.basename(image_paths[idx1]))[0]\n                    img_name2 = os.path.splitext(os.path.basename(image_paths[idx2]))[0]\n                    pair_key = f\"{img_name1}_{img_name2}\"\n                    f_match.create_dataset(pair_key, data=matches)\n\n    print(f\"โœ“ Matches saved to {matches_path}\")\n","metadata":{"id":"X-PKgmdwmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 3: Import to COLMAP\n# ============================================================================\n\ndef import_to_colmap(image_dir, feature_dir, database_path, single_camera=True):\n    \"\"\"\n    Import features and matches to COLMAP database\n\n    Args:\n            image_dir (str): Directory containing the images.\n            feature_dir (str): Directory to save/load extracted features.\n            database_path (str): Path to the database file.\n            single_camera (bool): Set to True if all images have the same dimensions (e.g., pre-resized).\n    \"\"\"\n    print(\"\\n=== Creating COLMAP Database ===\")\n\n    if os.path.exists(database_path):\n        os.remove(database_path)\n        print(f\"โœ“ Removed existing database\")\n\n    db = COLMAPDatabase.connect(database_path)\n    db.create_tables()\n\n    print(f\"Single camera mode: {single_camera}\")\n\n    image_files = [f for f in os.listdir(image_dir)\n                   if f.lower().endswith(('.jpg', '.jpeg', '.png'))]\n    if not image_files:\n        raise ValueError(f\"No images found in {image_dir}\")\n\n    first_image = sorted(image_files)[0]\n    img_ext = os.path.splitext(first_image)[1]\n    print(f\"Detected image extension: '{img_ext}'\")\n\n    fname_to_id = add_keypoints(\n        db,\n        feature_dir,\n        image_dir,\n        img_ext,\n        'PINHOLE',\n        single_camera=single_camera\n    )\n\n    add_matches(db, feature_dir, fname_to_id)\n    db.commit()\n    db.close()\n\n    print(f\"โœ“ Database created: {database_path}\")\n\n# ============================================================================\n# Step 4: Run COLMAP Mapper\n# ============================================================================\n\ndef run_colmap_mapper(database_path, image_dir, output_dir):\n    \"\"\"\n    Run COLMAP mapper with verbose output\n    \"\"\"\n    print(\"\\n=== Running COLMAP Reconstruction ===\")\n    os.makedirs(output_dir, exist_ok=True)\n    cmd = [\n        'colmap', 'mapper',\n        '--database_path', database_path,\n        '--image_path', image_dir,\n        '--output_path', output_dir,\n        '--Mapper.ba_refine_focal_length', '0',\n        '--Mapper.ba_refine_principal_point', '0',\n        '--Mapper.ba_refine_extra_params', '0',\n        '--Mapper.min_num_matches', '15',\n        '--Mapper.init_min_num_inliers', '50',\n        '--Mapper.max_num_models', '1',\n        '--Mapper.num_threads', '16',\n    ]\n    print(f\"Command: {' '.join(cmd)}\\n\")\n\n    process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n    for line in process.stdout:\n        print(line, end='')\n    process.wait()\n    if process.returncode == 0:\n        model_dir = os.path.join(output_dir, '0')\n        if os.path.exists(model_dir):\n            print(f\"\\nโœ“ COLMAP reconstruction complete: {model_dir}\")\n            return model_dir\n    raise RuntimeError(\"COLMAP reconstruction failed\")","metadata":{"id":"NJedFruCmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 5: Convert to Gaussian Splatting Format (if needed)\n# ============================================================================\n\ndef convert_to_gs_format(colmap_model_dir, output_dir):\n    \"\"\"\n    Verify COLMAP output and prepare paths for Gaussian Splatting.\n\n    Args:\n        colmap_model_dir (str): Path to the COLMAP sparse/0 directory.\n                                Example: /content/output/colmap/sparse/0\n        output_dir (str): Base output directory.\n\n    Returns:\n        colmap_parent_dir (str): The path to be passed to Gaussian Splatting.\n                                 Example: /content/output/colmap (Parent directory containing 'sparse/')\n    \"\"\"\n    print(\"\\n=== Verifying COLMAP Model for Gaussian Splatting ===\")\n\n    import pycolmap\n    reconstruction = pycolmap.Reconstruction(colmap_model_dir)\n\n    print(f\"Registered images: {len(reconstruction.images)}\")\n    print(f\"3D points: {len(reconstruction.points3D)}\")\n\n    # Check for files required by Gaussian Splatting\n    required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n    for file in required_files:\n        file_path = os.path.join(colmap_model_dir, file)\n        if not os.path.exists(file_path):\n            raise FileNotFoundError(f\"Required file not found: {file}\")\n        print(f\"  โœ“ {file}\")\n\n    # Return the grandparent directory of sparse/0\n    # /content/output/colmap/sparse/0 -> /content/output/colmap\n    colmap_parent_dir = os.path.dirname(os.path.dirname(colmap_model_dir))\n\n    print(f\"\\nโœ“ COLMAP model ready for Gaussian Splatting\")\n    print(f\"  Source path: {colmap_parent_dir}\")\n\n    return colmap_parent_dir","metadata":{"id":"4IioqnC1mVcM"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=30000):\n    \"\"\"\n    Train a Gaussian Splatting model.\n\n    Args:\n        colmap_dir (str): COLMAP parent directory (the directory containing 'sparse/').\n                          Example: /content/output/colmap\n        image_dir (str): Directory containing training images.\n                         Example: /content/output/processed_images\n        output_dir (str): Base directory for Gaussian Splatting output.\n        iterations (int): Total number of training iterations.\n\n    Returns:\n        gs_output_dir (str): Path to the generated Gaussian Splatting output.\n    \"\"\"\n    print(\"\\n=== Training Gaussian Splatting ===\")\n\n    gs_output_dir = os.path.join(output_dir, 'gs_output')\n    os.makedirs(gs_output_dir, exist_ok=True)\n\n    # Verify the Gaussian Splatting directory structure\n    sparse_dir = os.path.join(colmap_dir, 'sparse', '0')\n    if not os.path.exists(sparse_dir):\n        raise FileNotFoundError(f\"COLMAP sparse directory not found: {sparse_dir}\")\n\n    print(f\"COLMAP sparse model: {sparse_dir}\")\n    print(f\"Training images: {image_dir}\")\n    print(f\"Output: {gs_output_dir}\")\n    print(f\"Iterations: {iterations}\")\n\n    # Gaussian Splatting command\n    cmd = [\n        'python', 'train.py',\n        '-s', colmap_dir,            # Source directory (must contain 'sparse/')\n        '--images', image_dir,       # Explicitly specify the images directory\n        '-m', gs_output_dir,          # Model output directory\n        '--iterations', str(iterations),\n        '--test_iterations', str(iterations//2), str(iterations),\n        '--save_iterations', str(iterations//2), str(iterations),\n    ]\n\n    print(f\"\\nCommand: {' '.join(cmd)}\\n\")\n\n    result = subprocess.run(cmd, capture_output=True, text=True)\n\n    print(result.stdout)\n    if result.stderr:\n        print(\"STDERR:\", result.stderr)\n\n    if result.returncode != 0:\n        raise RuntimeError(\"Gaussian Splatting training failed\")\n\n    # Check for the existence of the generated PLY file\n    ply_path = os.path.join(gs_output_dir, 'point_cloud', f'iteration_{iterations}', 'point_cloud.ply')\n    if os.path.exists(ply_path):\n        size_mb = os.path.getsize(ply_path) / (1024 * 1024)\n        print(f\"\\nโœ“ Training complete!\")\n        print(f\"  PLY file: {ply_path}\")\n        print(f\"  Size: {size_mb:.2f} MB\")\n    else:\n        print(f\"โš ๏ธ  Warning: PLY file not found at the expected location\")\n\n    return gs_output_dir","metadata":{"id":"EiHoRSfzQ01b"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **main**","metadata":{"id":"IqNcsheVywit"}},{"cell_type":"code","source":"# NumPyไบ’ๆ›ๆ€งใฎๅ•้กŒใ‚’ไฟฎๆญฃ\nimport numpy as np\n\nprint()\nprint(\"-------!pip show numpy | grep Version--------\")\n!pip show numpy | grep Version\n\n# array_to_blob้–ขๆ•ฐใ‚’ใƒขใƒณใ‚ญใƒผใƒ‘ใƒƒใƒ\ndef array_to_blob_fixed(array):\n    return array.tobytes()\n\n# databaseใƒขใ‚ธใƒฅใƒผใƒซๅ†…ใฎ้–ขๆ•ฐใ‚’็ฝฎใๆ›ใˆ\nimport sys\nif 'database' in sys.modules:\n    sys.modules['database'].array_to_blob = array_to_blob_fixed","metadata":{"id":"oKi5O7rTvdea","outputId":"41728e81-c9a9-4076-a18e-05ab7b5803ab"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024,max_images=None):\n    \"\"\"\n    Complete pipeline: Images โ†’ Square Processing โ†’ COLMAP โ†’ Gaussian Splatting\n    \"\"\"\n    print(\"=\"*70)\n    print(\"Gaussian Splatting Preparation Pipeline\")\n    print(\"=\"*70)\n\n    # Step 0: Standardize images to square format\n    #processed_dir = os.path.join(output_dir, 'processed_images')\n    #processed_image_dir = preprocess_images_square(image_dir, processed_dir, size=square_size)\n\n    processed_image_dir = os.path.join(output_dir, \"processed_images\")\n\n    normalize_image_sizes_biplet(\n    input_dir=image_dir,\n    output_dir=processed_image_dir,\n    size=square_size,\n    max_images=max_images\n)\n\n    # Setup paths\n    feature_dir = os.path.join(output_dir, 'features')\n    colmap_dir = os.path.join(output_dir, 'colmap')\n    database_path = os.path.join(colmap_dir, 'database.db')\n    sparse_dir = os.path.join(colmap_dir, 'sparse')\n\n    os.makedirs(output_dir, exist_ok=True)\n    os.makedirs(colmap_dir, exist_ok=True)\n\n    # Get image paths\n    image_paths = sorted([\n        os.path.join(processed_image_dir, f)\n        for f in os.listdir(processed_image_dir)\n        if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n    ])\n\n    print(f\"\\n๐Ÿ“ธ Found {len(image_paths)} images\")\n\n    # Step 1: Generate image pairs\n    pairs, features = get_image_pairs(image_paths)\n\n    # Step 2: Feature matching with LightGlue\n    match_pairs_lightglue(image_paths, pairs, features, feature_dir)\n\n    # Step 3: Import data into COLMAP\n    # (single_camera=True assumes uniform image dimensions)\n    import_to_colmap(processed_image_dir, feature_dir, database_path, single_camera=True)\n\n    # Step 4: Run COLMAP Sparse Reconstruction\n    model_dir = run_colmap_mapper(database_path, processed_image_dir, sparse_dir)\n\n    # Step 5: Verify and prepare for Gaussian Splatting\n    colmap_parent = convert_to_gs_format(model_dir, output_dir)\n\n    # Step 6: Train Gaussian Splatting model\n    gs_output = train_gaussian_splatting(\n        colmap_dir=colmap_parent,\n        image_dir=processed_image_dir,\n        output_dir=output_dir,\n        iterations=3000\n    )\n\n    print(\"\\n\" + \"=\"*70)\n    print(\"โœ… Full Pipeline Successfully Completed!\")\n    print(\"=\"*70)\n    print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n\n    return gs_output\n\n\n# Example usage\nif __name__ == \"__main__\":\n    # Example: Tourist photos with varying resolutions/aspect ratios\n    IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain100\"\n    OUTPUT_DIR = \"/content/output\"\n\n    gs_output = main_pipeline(IMAGE_DIR, OUTPUT_DIR, square_size=1024,max_images=30)","metadata":{"id":"5-_UvgTtRiC_","outputId":"0f6a7562-16b8-4d99-b4e2-fcc616000067"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import torch\ntorch.cuda.empty_cache()","metadata":{"id":"SZzD2-K6islN"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"print('Congratulation! Successsfully Completed!')","metadata":{"id":"8jhKKtTqwv7O","outputId":"ff3285b8-a065-46fe-cd8a-7fc73c29ffb4"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"","metadata":{"id":"VQsLeKY8Rl8Y"},"outputs":[],"execution_count":null}]}