Upload biplet_asmk_mast3r_ps2_gs_kg_32_colab_04xx.ipynb
Browse files
biplet_asmk_mast3r_ps2_gs_kg_32_colab_04xx.ipynb
ADDED
|
@@ -0,0 +1,2007 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"metadata": {
|
| 3 |
+
"kernelspec": {
|
| 4 |
+
"name": "python3",
|
| 5 |
+
"display_name": "Python 3",
|
| 6 |
+
"language": "python"
|
| 7 |
+
},
|
| 8 |
+
"language_info": {
|
| 9 |
+
"name": "python",
|
| 10 |
+
"version": "3.12.12",
|
| 11 |
+
"mimetype": "text/x-python",
|
| 12 |
+
"codemirror_mode": {
|
| 13 |
+
"name": "ipython",
|
| 14 |
+
"version": 3
|
| 15 |
+
},
|
| 16 |
+
"pygments_lexer": "ipython3",
|
| 17 |
+
"nbconvert_exporter": "python",
|
| 18 |
+
"file_extension": ".py"
|
| 19 |
+
},
|
| 20 |
+
"colab": {
|
| 21 |
+
"provenance": [],
|
| 22 |
+
"gpuType": "T4"
|
| 23 |
+
},
|
| 24 |
+
"accelerator": "GPU",
|
| 25 |
+
"kaggle": {
|
| 26 |
+
"accelerator": "nvidiaTeslaT4",
|
| 27 |
+
"dataSources": [
|
| 28 |
+
{
|
| 29 |
+
"sourceId": 14571475,
|
| 30 |
+
"sourceType": "datasetVersion",
|
| 31 |
+
"datasetId": 1429416
|
| 32 |
+
}
|
| 33 |
+
],
|
| 34 |
+
"dockerImageVersionId": 31260,
|
| 35 |
+
"isInternetEnabled": true,
|
| 36 |
+
"language": "python",
|
| 37 |
+
"sourceType": "notebook",
|
| 38 |
+
"isGpuEnabled": true
|
| 39 |
+
}
|
| 40 |
+
},
|
| 41 |
+
"nbformat_minor": 0,
|
| 42 |
+
"nbformat": 4,
|
| 43 |
+
"cells": [
|
| 44 |
+
{
|
| 45 |
+
"cell_type": "markdown",
|
| 46 |
+
"source": [
|
| 47 |
+
"# **biplet-asmk-mast3r-ps2-gs-kg-32-colab**\n",
|
| 48 |
+
"\n"
|
| 49 |
+
],
|
| 50 |
+
"metadata": {
|
| 51 |
+
"id": "qDQLX3PArmh8"
|
| 52 |
+
}
|
| 53 |
+
},
|
| 54 |
+
{
|
| 55 |
+
"cell_type": "markdown",
|
| 56 |
+
"source": [
|
| 57 |
+
"https://huggingface.co/datasets/stpete2/ipynb/blob/main/biplet-asmk-mast3r-ps2-gs-kg-32.ipynb"
|
| 58 |
+
],
|
| 59 |
+
"metadata": {
|
| 60 |
+
"id": "Yhla_oBUjLmD"
|
| 61 |
+
}
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"cell_type": "code",
|
| 65 |
+
"source": [
|
| 66 |
+
"#これを元にcolab化 2025/01/22 16:00"
|
| 67 |
+
],
|
| 68 |
+
"metadata": {
|
| 69 |
+
"id": "UyF0gaG8jOXu"
|
| 70 |
+
},
|
| 71 |
+
"execution_count": 1,
|
| 72 |
+
"outputs": []
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"cell_type": "markdown",
|
| 76 |
+
"source": [
|
| 77 |
+
"v.32 全面見直し"
|
| 78 |
+
],
|
| 79 |
+
"metadata": {
|
| 80 |
+
"id": "uNZNREeejLmD"
|
| 81 |
+
}
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"cell_type": "code",
|
| 85 |
+
"source": [],
|
| 86 |
+
"metadata": {
|
| 87 |
+
"trusted": true,
|
| 88 |
+
"id": "yH63Q7yCjLmE"
|
| 89 |
+
},
|
| 90 |
+
"outputs": [],
|
| 91 |
+
"execution_count": 1
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"cell_type": "code",
|
| 95 |
+
"source": [
|
| 96 |
+
"# =====================================================================\n",
|
| 97 |
+
"# CELL 1: Install Dependencies\n",
|
| 98 |
+
"# =====================================================================\n",
|
| 99 |
+
"!pip install roma einops timm huggingface_hub\n",
|
| 100 |
+
"!pip install opencv-python pillow tqdm pyaml cython plyfile\n",
|
| 101 |
+
"!pip install pycolmap trimesh\n",
|
| 102 |
+
"!pip uninstall -y numpy scipy\n",
|
| 103 |
+
"!pip install numpy==1.26.4 scipy==1.11.4\n",
|
| 104 |
+
"break"
|
| 105 |
+
],
|
| 106 |
+
"metadata": {
|
| 107 |
+
"trusted": true,
|
| 108 |
+
"id": "h5Exo6FBjLmE",
|
| 109 |
+
"colab": {
|
| 110 |
+
"base_uri": "https://localhost:8080/",
|
| 111 |
+
"height": 1000
|
| 112 |
+
},
|
| 113 |
+
"outputId": "f04a7093-47d3-45e8-8b95-450e8fc351b6"
|
| 114 |
+
},
|
| 115 |
+
"outputs": [
|
| 116 |
+
{
|
| 117 |
+
"output_type": "stream",
|
| 118 |
+
"name": "stdout",
|
| 119 |
+
"text": [
|
| 120 |
+
"Collecting roma\n",
|
| 121 |
+
" Downloading roma-1.5.4-py3-none-any.whl.metadata (5.5 kB)\n",
|
| 122 |
+
"Requirement already satisfied: einops in /usr/local/lib/python3.12/dist-packages (0.8.1)\n",
|
| 123 |
+
"Requirement already satisfied: timm in /usr/local/lib/python3.12/dist-packages (1.0.24)\n",
|
| 124 |
+
"Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\n",
|
| 125 |
+
"Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from timm) (2.9.0+cu126)\n",
|
| 126 |
+
"Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (from timm) (0.24.0+cu126)\n",
|
| 127 |
+
"Requirement already satisfied: pyyaml in /usr/local/lib/python3.12/dist-packages (from timm) (6.0.3)\n",
|
| 128 |
+
"Requirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (from timm) (0.7.0)\n",
|
| 129 |
+
"Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (3.20.3)\n",
|
| 130 |
+
"Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2025.3.0)\n",
|
| 131 |
+
"Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (25.0)\n",
|
| 132 |
+
"Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2.32.4)\n",
|
| 133 |
+
"Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.67.1)\n",
|
| 134 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.15.0)\n",
|
| 135 |
+
"Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (1.2.0)\n",
|
| 136 |
+
"Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.4.4)\n",
|
| 137 |
+
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.11)\n",
|
| 138 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2.5.0)\n",
|
| 139 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2026.1.4)\n",
|
| 140 |
+
"Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch->timm) (75.2.0)\n",
|
| 141 |
+
"Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (1.14.0)\n",
|
| 142 |
+
"Requirement already satisfied: networkx>=2.5.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.6.1)\n",
|
| 143 |
+
"Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.1.6)\n",
|
| 144 |
+
"Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\n",
|
| 145 |
+
"Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\n",
|
| 146 |
+
"Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.80)\n",
|
| 147 |
+
"Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (9.10.2.21)\n",
|
| 148 |
+
"Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.4.1)\n",
|
| 149 |
+
"Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (11.3.0.4)\n",
|
| 150 |
+
"Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (10.3.7.77)\n",
|
| 151 |
+
"Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (11.7.1.2)\n",
|
| 152 |
+
"Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.5.4.2)\n",
|
| 153 |
+
"Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (0.7.1)\n",
|
| 154 |
+
"Requirement already satisfied: nvidia-nccl-cu12==2.27.5 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (2.27.5)\n",
|
| 155 |
+
"Requirement already satisfied: nvidia-nvshmem-cu12==3.3.20 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.3.20)\n",
|
| 156 |
+
"Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\n",
|
| 157 |
+
"Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.85)\n",
|
| 158 |
+
"Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (1.11.1.6)\n",
|
| 159 |
+
"Requirement already satisfied: triton==3.5.0 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.5.0)\n",
|
| 160 |
+
"Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from torchvision->timm) (2.0.2)\n",
|
| 161 |
+
"Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.12/dist-packages (from torchvision->timm) (11.3.0)\n",
|
| 162 |
+
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->timm) (1.3.0)\n",
|
| 163 |
+
"Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch->timm) (3.0.3)\n",
|
| 164 |
+
"Downloading roma-1.5.4-py3-none-any.whl (25 kB)\n",
|
| 165 |
+
"Installing collected packages: roma\n",
|
| 166 |
+
"Successfully installed roma-1.5.4\n",
|
| 167 |
+
"Requirement already satisfied: opencv-python in /usr/local/lib/python3.12/dist-packages (4.12.0.88)\n",
|
| 168 |
+
"Requirement already satisfied: pillow in /usr/local/lib/python3.12/dist-packages (11.3.0)\n",
|
| 169 |
+
"Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\n",
|
| 170 |
+
"Collecting pyaml\n",
|
| 171 |
+
" Downloading pyaml-25.7.0-py3-none-any.whl.metadata (12 kB)\n",
|
| 172 |
+
"Requirement already satisfied: cython in /usr/local/lib/python3.12/dist-packages (3.0.12)\n",
|
| 173 |
+
"Collecting plyfile\n",
|
| 174 |
+
" Downloading plyfile-1.1.3-py3-none-any.whl.metadata (43 kB)\n",
|
| 175 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m43.3/43.3 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 176 |
+
"\u001b[?25hRequirement already satisfied: numpy<2.3.0,>=2 in /usr/local/lib/python3.12/dist-packages (from opencv-python) (2.0.2)\n",
|
| 177 |
+
"Requirement already satisfied: PyYAML in /usr/local/lib/python3.12/dist-packages (from pyaml) (6.0.3)\n",
|
| 178 |
+
"Downloading pyaml-25.7.0-py3-none-any.whl (26 kB)\n",
|
| 179 |
+
"Downloading plyfile-1.1.3-py3-none-any.whl (36 kB)\n",
|
| 180 |
+
"Installing collected packages: pyaml, plyfile\n",
|
| 181 |
+
"Successfully installed plyfile-1.1.3 pyaml-25.7.0\n",
|
| 182 |
+
"Collecting pycolmap\n",
|
| 183 |
+
" Downloading pycolmap-3.13.0-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (10 kB)\n",
|
| 184 |
+
"Collecting trimesh\n",
|
| 185 |
+
" Downloading trimesh-4.11.1-py3-none-any.whl.metadata (13 kB)\n",
|
| 186 |
+
"Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from pycolmap) (2.0.2)\n",
|
| 187 |
+
"Downloading pycolmap-3.13.0-cp312-cp312-manylinux_2_28_x86_64.whl (20.3 MB)\n",
|
| 188 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m57.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 189 |
+
"\u001b[?25hDownloading trimesh-4.11.1-py3-none-any.whl (740 kB)\n",
|
| 190 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m740.4/740.4 kB\u001b[0m \u001b[31m63.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 191 |
+
"\u001b[?25hInstalling collected packages: trimesh, pycolmap\n",
|
| 192 |
+
"Successfully installed pycolmap-3.13.0 trimesh-4.11.1\n",
|
| 193 |
+
"Found existing installation: numpy 2.0.2\n",
|
| 194 |
+
"Uninstalling numpy-2.0.2:\n",
|
| 195 |
+
" Successfully uninstalled numpy-2.0.2\n",
|
| 196 |
+
"Found existing installation: scipy 1.16.3\n",
|
| 197 |
+
"Uninstalling scipy-1.16.3:\n",
|
| 198 |
+
" Successfully uninstalled scipy-1.16.3\n",
|
| 199 |
+
"Collecting numpy==1.26.4\n",
|
| 200 |
+
" Downloading numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (61 kB)\n",
|
| 201 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.0/61.0 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 202 |
+
"\u001b[?25hCollecting scipy==1.11.4\n",
|
| 203 |
+
" Downloading scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (60 kB)\n",
|
| 204 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.4/60.4 kB\u001b[0m \u001b[31m6.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 205 |
+
"\u001b[?25hDownloading numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (18.0 MB)\n",
|
| 206 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.0/18.0 MB\u001b[0m \u001b[31m128.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 207 |
+
"\u001b[?25hDownloading scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (35.8 MB)\n",
|
| 208 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m35.8/35.8 MB\u001b[0m \u001b[31m18.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
| 209 |
+
"\u001b[?25hInstalling collected packages: numpy, scipy\n",
|
| 210 |
+
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
|
| 211 |
+
"shap 0.50.0 requires numpy>=2, but you have numpy 1.26.4 which is incompatible.\n",
|
| 212 |
+
"libpysal 4.14.1 requires scipy>=1.12.0, but you have scipy 1.11.4 which is incompatible.\n",
|
| 213 |
+
"opencv-contrib-python 4.12.0.88 requires numpy<2.3.0,>=2; python_version >= \"3.9\", but you have numpy 1.26.4 which is incompatible.\n",
|
| 214 |
+
"inequality 1.1.2 requires scipy>=1.12, but you have scipy 1.11.4 which is incompatible.\n",
|
| 215 |
+
"spopt 0.7.0 requires scipy>=1.12.0, but you have scipy 1.11.4 which is incompatible.\n",
|
| 216 |
+
"jaxlib 0.7.2 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n",
|
| 217 |
+
"jaxlib 0.7.2 requires scipy>=1.13, but you have scipy 1.11.4 which is incompatible.\n",
|
| 218 |
+
"pytensor 2.36.3 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n",
|
| 219 |
+
"opencv-python 4.12.0.88 requires numpy<2.3.0,>=2; python_version >= \"3.9\", but you have numpy 1.26.4 which is incompatible.\n",
|
| 220 |
+
"giddy 2.3.8 requires scipy>=1.12, but you have scipy 1.11.4 which is incompatible.\n",
|
| 221 |
+
"tobler 0.13.0 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n",
|
| 222 |
+
"tobler 0.13.0 requires scipy>=1.13, but you have scipy 1.11.4 which is incompatible.\n",
|
| 223 |
+
"esda 2.8.1 requires scipy>=1.12, but you have scipy 1.11.4 which is incompatible.\n",
|
| 224 |
+
"tsfresh 0.21.1 requires scipy>=1.14.0; python_version >= \"3.10\", but you have scipy 1.11.4 which is incompatible.\n",
|
| 225 |
+
"access 1.1.10.post3 requires scipy>=1.14.1, but you have scipy 1.11.4 which is incompatible.\n",
|
| 226 |
+
"jax 0.7.2 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n",
|
| 227 |
+
"jax 0.7.2 requires scipy>=1.13, but you have scipy 1.11.4 which is incompatible.\n",
|
| 228 |
+
"opencv-python-headless 4.12.0.88 requires numpy<2.3.0,>=2; python_version >= \"3.9\", but you have numpy 1.26.4 which is incompatible.\n",
|
| 229 |
+
"rasterio 1.5.0 requires numpy>=2, but you have numpy 1.26.4 which is incompatible.\n",
|
| 230 |
+
"mapclassify 2.10.0 requires scipy>=1.12, but you have scipy 1.11.4 which is incompatible.\u001b[0m\u001b[31m\n",
|
| 231 |
+
"\u001b[0mSuccessfully installed numpy-1.26.4 scipy-1.11.4\n"
|
| 232 |
+
]
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"output_type": "display_data",
|
| 236 |
+
"data": {
|
| 237 |
+
"application/vnd.colab-display-data+json": {
|
| 238 |
+
"pip_warning": {
|
| 239 |
+
"packages": [
|
| 240 |
+
"numpy"
|
| 241 |
+
]
|
| 242 |
+
},
|
| 243 |
+
"id": "eb672074ebc8494ca76a9c18370c3f16"
|
| 244 |
+
}
|
| 245 |
+
},
|
| 246 |
+
"metadata": {}
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"output_type": "error",
|
| 250 |
+
"ename": "SyntaxError",
|
| 251 |
+
"evalue": "'break' outside loop (ipython-input-2884072918.py, line 9)",
|
| 252 |
+
"traceback": [
|
| 253 |
+
"\u001b[0;36m File \u001b[0;32m\"/tmp/ipython-input-2884072918.py\"\u001b[0;36m, line \u001b[0;32m9\u001b[0m\n\u001b[0;31m break\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m 'break' outside loop\n"
|
| 254 |
+
]
|
| 255 |
+
}
|
| 256 |
+
],
|
| 257 |
+
"execution_count": 2
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"cell_type": "code",
|
| 261 |
+
"source": [
|
| 262 |
+
"# =====================================================================\n",
|
| 263 |
+
"# CELL 2: Restart Kernel (Run this after Cell 1)\n",
|
| 264 |
+
"# =====================================================================\n",
|
| 265 |
+
"# Restart kernel, then run from this cell\n",
|
| 266 |
+
"\n",
|
| 267 |
+
"from google.colab import drive\n",
|
| 268 |
+
"drive.mount('/content/drive')\n",
|
| 269 |
+
"\n",
|
| 270 |
+
"# =====================================================================\n",
|
| 271 |
+
"# CELL 3: Verify NumPy Version\n",
|
| 272 |
+
"# =====================================================================\n",
|
| 273 |
+
"import numpy as np\n",
|
| 274 |
+
"print(f\"✓ np: {np.__version__} - {np.__file__}\")\n",
|
| 275 |
+
"!pip show numpy | grep Version\n",
|
| 276 |
+
"\n",
|
| 277 |
+
"# =====================================================================\n",
|
| 278 |
+
"# CELL 4: Verify Roma Installation\n",
|
| 279 |
+
"# =====================================================================\n",
|
| 280 |
+
"try:\n",
|
| 281 |
+
" import roma\n",
|
| 282 |
+
" print(\"✓ roma is installed\")\n",
|
| 283 |
+
"except ModuleNotFoundError:\n",
|
| 284 |
+
" print(\"⚠️ roma not found, installing...\")\n",
|
| 285 |
+
" !pip install roma\n",
|
| 286 |
+
" import roma\n",
|
| 287 |
+
" print(\"✓ roma installed\")"
|
| 288 |
+
],
|
| 289 |
+
"metadata": {
|
| 290 |
+
"trusted": true,
|
| 291 |
+
"id": "XgxGC30cjLmF",
|
| 292 |
+
"colab": {
|
| 293 |
+
"base_uri": "https://localhost:8080/"
|
| 294 |
+
},
|
| 295 |
+
"outputId": "c51cda02-3871-4ea8-fb9d-05b992f6f697"
|
| 296 |
+
},
|
| 297 |
+
"outputs": [
|
| 298 |
+
{
|
| 299 |
+
"output_type": "stream",
|
| 300 |
+
"name": "stdout",
|
| 301 |
+
"text": [
|
| 302 |
+
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n",
|
| 303 |
+
"✓ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\n",
|
| 304 |
+
"Version: 1.26.4\n",
|
| 305 |
+
"Version 3.1, 31 March 2009\n",
|
| 306 |
+
" Version 3, 29 June 2007\n",
|
| 307 |
+
" 5. Conveying Modified Source Versions.\n",
|
| 308 |
+
" 14. Revised Versions of this License.\n",
|
| 309 |
+
"✓ roma is installed\n"
|
| 310 |
+
]
|
| 311 |
+
}
|
| 312 |
+
],
|
| 313 |
+
"execution_count": 9
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"cell_type": "code",
|
| 317 |
+
"source": [
|
| 318 |
+
"# =====================================================================\n",
|
| 319 |
+
"# CELL 5: Clone Repositories\n",
|
| 320 |
+
"# =====================================================================\n",
|
| 321 |
+
"import os\n",
|
| 322 |
+
"import sys\n",
|
| 323 |
+
"\n",
|
| 324 |
+
"# MASt3Rをクローン\n",
|
| 325 |
+
"if not os.path.exists('/content/mast3r'):\n",
|
| 326 |
+
" print(\"Cloning MASt3R repository...\")\n",
|
| 327 |
+
" !git clone --recursive https://github.com/naver/mast3r.git /content/mast3r\n",
|
| 328 |
+
" print(\"✓ MASt3R cloned\")\n",
|
| 329 |
+
"else:\n",
|
| 330 |
+
" print(\"✓ MASt3R already exists\")\n",
|
| 331 |
+
"\n",
|
| 332 |
+
"# DUSt3Rをクローン(MASt3R内に必要)\n",
|
| 333 |
+
"if not os.path.exists('/content/mast3r/dust3r'):\n",
|
| 334 |
+
" print(\"Cloning DUSt3R repository...\")\n",
|
| 335 |
+
" !git clone --recursive https://github.com/naver/dust3r.git /content/mast3r/dust3r\n",
|
| 336 |
+
" print(\"✓ DUSt3R cloned\")\n",
|
| 337 |
+
"else:\n",
|
| 338 |
+
" print(\"✓ DUSt3R already exists\")\n",
|
| 339 |
+
"\n",
|
| 340 |
+
"# ASMKをクローン\n",
|
| 341 |
+
"if not os.path.exists('/content/asmk'):\n",
|
| 342 |
+
" print(\"Cloning ASMK repository...\")\n",
|
| 343 |
+
" !git clone https://github.com/jenicek/asmk.git /content/asmk\n",
|
| 344 |
+
" print(\"✓ ASMK cloned\")\n",
|
| 345 |
+
"else:\n",
|
| 346 |
+
" print(\"✓ ASMK already exists\")\n",
|
| 347 |
+
"\n",
|
| 348 |
+
"# パスを追加\n",
|
| 349 |
+
"sys.path.insert(0, '/content/mast3r')\n",
|
| 350 |
+
"sys.path.insert(0, '/content/mast3r/dust3r')\n",
|
| 351 |
+
"sys.path.insert(0, '/content/asmk')\n",
|
| 352 |
+
"\n",
|
| 353 |
+
"# 確認\n",
|
| 354 |
+
"try:\n",
|
| 355 |
+
" from dust3r.model import AsymmetricCroCo3DStereo\n",
|
| 356 |
+
" print(\"✓ dust3r.model imported successfully\")\n",
|
| 357 |
+
"except ImportError as e:\n",
|
| 358 |
+
" print(f\"✗ Import error: {e}\")\n",
|
| 359 |
+
"\n",
|
| 360 |
+
"# croco(MASt3Rの依存関係)もクローン\n",
|
| 361 |
+
"if not os.path.exists('/content/mast3r/croco'):\n",
|
| 362 |
+
" print(\"Cloning CroCo repository...\")\n",
|
| 363 |
+
" !git clone --recursive https://github.com/naver/croco.git /content/mast3r/croco\n",
|
| 364 |
+
" print(\"✓ CroCo cloned\")\n",
|
| 365 |
+
"\n",
|
| 366 |
+
"# CroCo v2の依存関係\n",
|
| 367 |
+
"if not os.path.exists('/content/mast3r/croco/models/curope'):\n",
|
| 368 |
+
" print(\"Cloning CuRoPe...\")\n",
|
| 369 |
+
" !git clone --recursive https://github.com/naver/curope.git /content/mast3r/croco/models/curope\n",
|
| 370 |
+
" print(\"✓ CuRoPe cloned\")\n",
|
| 371 |
+
"\n",
|
| 372 |
+
"# =====================================================================\n",
|
| 373 |
+
"# CELL 6: Clone and Build Gaussian Splatting\n",
|
| 374 |
+
"# =====================================================================\n",
|
| 375 |
+
"print(\"\\n\" + \"=\"*70)\n",
|
| 376 |
+
"print(\"STEP 2: Clone Gaussian Splatting\")\n",
|
| 377 |
+
"print(\"=\"*70)\n",
|
| 378 |
+
"WORK_DIR = \"/content/gaussian-splatting\"\n",
|
| 379 |
+
"\n",
|
| 380 |
+
"import subprocess\n",
|
| 381 |
+
"if not os.path.exists(WORK_DIR):\n",
|
| 382 |
+
" subprocess.run([\n",
|
| 383 |
+
" \"git\", \"clone\", \"--recursive\",\n",
|
| 384 |
+
" \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n",
|
| 385 |
+
" WORK_DIR\n",
|
| 386 |
+
" ], capture_output=True)\n",
|
| 387 |
+
" print(\"✓ Cloned\")\n",
|
| 388 |
+
"else:\n",
|
| 389 |
+
" print(\"✓ Already exists\")\n",
|
| 390 |
+
"\n",
|
| 391 |
+
"# インストールが必要なディレクトリ\n",
|
| 392 |
+
"submodules = [\n",
|
| 393 |
+
" \"/content/gaussian-splatting/submodules/diff-gaussian-rasterization\",\n",
|
| 394 |
+
" \"/content/gaussian-splatting/submodules/simple-knn\"\n",
|
| 395 |
+
"]\n",
|
| 396 |
+
"\n",
|
| 397 |
+
"for path in submodules:\n",
|
| 398 |
+
" print(f\"Installing {path}...\")\n",
|
| 399 |
+
" subprocess.run([\"pip\", \"install\", path], check=True)\n",
|
| 400 |
+
"\n",
|
| 401 |
+
"print(\"✓ Custom CUDA modules installed.\")\n",
|
| 402 |
+
"\n",
|
| 403 |
+
"# =====================================================================\n",
|
| 404 |
+
"# CELL 7: Verify NumPy Again\n",
|
| 405 |
+
"# =====================================================================\n",
|
| 406 |
+
"import numpy as np\n",
|
| 407 |
+
"print(f\"✓ np: {np.__version__} - {np.__file__}\")\n",
|
| 408 |
+
"!pip show numpy | grep Version"
|
| 409 |
+
],
|
| 410 |
+
"metadata": {
|
| 411 |
+
"trusted": true,
|
| 412 |
+
"id": "EF_Z8VDLjLmF",
|
| 413 |
+
"colab": {
|
| 414 |
+
"base_uri": "https://localhost:8080/"
|
| 415 |
+
},
|
| 416 |
+
"outputId": "b07d4814-f604-4d14-8078-8885526be114"
|
| 417 |
+
},
|
| 418 |
+
"outputs": [
|
| 419 |
+
{
|
| 420 |
+
"output_type": "stream",
|
| 421 |
+
"name": "stdout",
|
| 422 |
+
"text": [
|
| 423 |
+
"✓ MASt3R already exists\n",
|
| 424 |
+
"✓ DUSt3R already exists\n",
|
| 425 |
+
"✓ ASMK already exists\n",
|
| 426 |
+
"✓ dust3r.model imported successfully\n",
|
| 427 |
+
"\n",
|
| 428 |
+
"======================================================================\n",
|
| 429 |
+
"STEP 2: Clone Gaussian Splatting\n",
|
| 430 |
+
"======================================================================\n",
|
| 431 |
+
"✓ Already exists\n",
|
| 432 |
+
"Installing /content/gaussian-splatting/submodules/diff-gaussian-rasterization...\n",
|
| 433 |
+
"Installing /content/gaussian-splatting/submodules/simple-knn...\n",
|
| 434 |
+
"✓ Custom CUDA modules installed.\n",
|
| 435 |
+
"✓ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\n",
|
| 436 |
+
"Version: 1.26.4\n",
|
| 437 |
+
"Version 3.1, 31 March 2009\n",
|
| 438 |
+
" Version 3, 29 June 2007\n",
|
| 439 |
+
" 5. Conveying Modified Source Versions.\n",
|
| 440 |
+
" 14. Revised Versions of this License.\n"
|
| 441 |
+
]
|
| 442 |
+
}
|
| 443 |
+
],
|
| 444 |
+
"execution_count": 10
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"cell_type": "code",
|
| 448 |
+
"source": [
|
| 449 |
+
"# =====================================================================\n",
|
| 450 |
+
"# CELL 8: Import Core Libraries and Configure Memory\n",
|
| 451 |
+
"# =====================================================================\n",
|
| 452 |
+
"import os\n",
|
| 453 |
+
"import sys\n",
|
| 454 |
+
"import gc\n",
|
| 455 |
+
"import torch\n",
|
| 456 |
+
"import numpy as np\n",
|
| 457 |
+
"from pathlib import Path\n",
|
| 458 |
+
"from tqdm import tqdm\n",
|
| 459 |
+
"import torch.nn.functional as F\n",
|
| 460 |
+
"import shutil\n",
|
| 461 |
+
"from PIL import Image\n",
|
| 462 |
+
"\n",
|
| 463 |
+
"# MEMORY MANAGEMENT\n",
|
| 464 |
+
"os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'\n",
|
| 465 |
+
"\n",
|
| 466 |
+
"def clear_memory():\n",
|
| 467 |
+
" \"\"\"メモリクリア関数\"\"\"\n",
|
| 468 |
+
" gc.collect()\n",
|
| 469 |
+
" if torch.cuda.is_available():\n",
|
| 470 |
+
" torch.cuda.empty_cache()\n",
|
| 471 |
+
" torch.cuda.synchronize()\n",
|
| 472 |
+
"\n",
|
| 473 |
+
"# CONFIGURATION\n",
|
| 474 |
+
"class Config:\n",
|
| 475 |
+
" DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
|
| 476 |
+
" MAST3R_WEIGHTS = \"naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\"\n",
|
| 477 |
+
" DUST3R_WEIGHTS = \"naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\"\n",
|
| 478 |
+
" RETRIEVAL_TOPK = 10\n",
|
| 479 |
+
" IMAGE_SIZE = 224\n",
|
| 480 |
+
"\n",
|
| 481 |
+
"# =====================================================================\n",
|
| 482 |
+
"# CELL 9: Image Preprocessing Functions\n",
|
| 483 |
+
"# =====================================================================\n",
|
| 484 |
+
"def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n",
|
| 485 |
+
" \"\"\"\n",
|
| 486 |
+
" Generates two square crops (Left & Right or Top & Bottom)\n",
|
| 487 |
+
" from each image in a directory.\n",
|
| 488 |
+
" \"\"\"\n",
|
| 489 |
+
" if output_dir is None:\n",
|
| 490 |
+
" output_dir = input_dir + \"_biplet\"\n",
|
| 491 |
+
"\n",
|
| 492 |
+
" os.makedirs(output_dir, exist_ok=True)\n",
|
| 493 |
+
"\n",
|
| 494 |
+
" print(f\"\\n=== Generating Biplet Crops ({size}x{size}) ===\")\n",
|
| 495 |
+
"\n",
|
| 496 |
+
" converted_count = 0\n",
|
| 497 |
+
" size_stats = {}\n",
|
| 498 |
+
"\n",
|
| 499 |
+
" for img_file in tqdm(sorted(os.listdir(input_dir)), desc=\"Creating biplets\"):\n",
|
| 500 |
+
" if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n",
|
| 501 |
+
" continue\n",
|
| 502 |
+
"\n",
|
| 503 |
+
" input_path = os.path.join(input_dir, img_file)\n",
|
| 504 |
+
"\n",
|
| 505 |
+
" try:\n",
|
| 506 |
+
" img = Image.open(input_path)\n",
|
| 507 |
+
" original_size = img.size\n",
|
| 508 |
+
"\n",
|
| 509 |
+
" size_key = f\"{original_size[0]}x{original_size[1]}\"\n",
|
| 510 |
+
" size_stats[size_key] = size_stats.get(size_key, 0) + 1\n",
|
| 511 |
+
"\n",
|
| 512 |
+
" # Generate 2 crops\n",
|
| 513 |
+
" crops = generate_two_crops(img, size)\n",
|
| 514 |
+
"\n",
|
| 515 |
+
" base_name, ext = os.path.splitext(img_file)\n",
|
| 516 |
+
" for mode, cropped_img in crops.items():\n",
|
| 517 |
+
" output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n",
|
| 518 |
+
" cropped_img.save(output_path, quality=95)\n",
|
| 519 |
+
"\n",
|
| 520 |
+
" converted_count += 1\n",
|
| 521 |
+
"\n",
|
| 522 |
+
" except Exception as e:\n",
|
| 523 |
+
" print(f\" ✗ Error processing {img_file}: {e}\")\n",
|
| 524 |
+
"\n",
|
| 525 |
+
" print(f\"\\n✓ Biplet generation complete:\")\n",
|
| 526 |
+
" print(f\" Source images: {converted_count}\")\n",
|
| 527 |
+
" print(f\" Biplet crops generated: {converted_count * 2}\")\n",
|
| 528 |
+
" print(f\" Original size distribution: {size_stats}\")\n",
|
| 529 |
+
"\n",
|
| 530 |
+
" return output_dir\n",
|
| 531 |
+
"\n",
|
| 532 |
+
"\n",
|
| 533 |
+
"def generate_two_crops(img, size):\n",
|
| 534 |
+
" \"\"\"\n",
|
| 535 |
+
" Crops the image into a square and returns 2 variations\n",
|
| 536 |
+
" \"\"\"\n",
|
| 537 |
+
" width, height = img.size\n",
|
| 538 |
+
" crop_size = min(width, height)\n",
|
| 539 |
+
" crops = {}\n",
|
| 540 |
+
"\n",
|
| 541 |
+
" if width > height:\n",
|
| 542 |
+
" # Landscape → Left & Right\n",
|
| 543 |
+
" positions = {\n",
|
| 544 |
+
" 'left': 0,\n",
|
| 545 |
+
" 'right': width - crop_size\n",
|
| 546 |
+
" }\n",
|
| 547 |
+
" for mode, x_offset in positions.items():\n",
|
| 548 |
+
" box = (x_offset, 0, x_offset + crop_size, crop_size)\n",
|
| 549 |
+
" crops[mode] = img.crop(box).resize(\n",
|
| 550 |
+
" (size, size),\n",
|
| 551 |
+
" Image.Resampling.LANCZOS\n",
|
| 552 |
+
" )\n",
|
| 553 |
+
" else:\n",
|
| 554 |
+
" # Portrait or Square → Top & Bottom\n",
|
| 555 |
+
" positions = {\n",
|
| 556 |
+
" 'top': 0,\n",
|
| 557 |
+
" 'bottom': height - crop_size\n",
|
| 558 |
+
" }\n",
|
| 559 |
+
" for mode, y_offset in positions.items():\n",
|
| 560 |
+
" box = (0, y_offset, crop_size, y_offset + crop_size)\n",
|
| 561 |
+
" crops[mode] = img.crop(box).resize(\n",
|
| 562 |
+
" (size, size),\n",
|
| 563 |
+
" Image.Resampling.LANCZOS\n",
|
| 564 |
+
" )\n",
|
| 565 |
+
"\n",
|
| 566 |
+
" return crops\n",
|
| 567 |
+
"\n",
|
| 568 |
+
"# =====================================================================\n",
|
| 569 |
+
"# CELL 10: Image Loading Function\n",
|
| 570 |
+
"# =====================================================================\n",
|
| 571 |
+
"def load_images_from_directory(image_dir, max_images=200):\n",
|
| 572 |
+
" \"\"\"ディレクトリから画像をロード\"\"\"\n",
|
| 573 |
+
" print(f\"\\nLoading images from: {image_dir}\")\n",
|
| 574 |
+
"\n",
|
| 575 |
+
" valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}\n",
|
| 576 |
+
" image_paths = []\n",
|
| 577 |
+
"\n",
|
| 578 |
+
" for ext in valid_extensions:\n",
|
| 579 |
+
" image_paths.extend(sorted(Path(image_dir).glob(f'*{ext}')))\n",
|
| 580 |
+
" image_paths.extend(sorted(Path(image_dir).glob(f'*{ext.upper()}')))\n",
|
| 581 |
+
"\n",
|
| 582 |
+
" image_paths = sorted(set(str(p) for p in image_paths))\n",
|
| 583 |
+
"\n",
|
| 584 |
+
" if len(image_paths) > max_images:\n",
|
| 585 |
+
" print(f\"⚠️ Limiting from {len(image_paths)} to {max_images} images\")\n",
|
| 586 |
+
" image_paths = image_paths[:max_images]\n",
|
| 587 |
+
"\n",
|
| 588 |
+
" print(f\"✓ Found {len(image_paths)} images\")\n",
|
| 589 |
+
" return image_paths"
|
| 590 |
+
],
|
| 591 |
+
"metadata": {
|
| 592 |
+
"trusted": true,
|
| 593 |
+
"id": "_rFAsFGDjLmF"
|
| 594 |
+
},
|
| 595 |
+
"outputs": [],
|
| 596 |
+
"execution_count": 11
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"cell_type": "code",
|
| 600 |
+
"source": [
|
| 601 |
+
"# =====================================================================\n",
|
| 602 |
+
"# CELL 11: MASt3R Model Loading\n",
|
| 603 |
+
"# =====================================================================\n",
|
| 604 |
+
"def load_mast3r_model(device):\n",
|
| 605 |
+
" \"\"\"MASt3Rモデルをロード\"\"\"\n",
|
| 606 |
+
" print(\"\\n=== Loading MASt3R Model ===\")\n",
|
| 607 |
+
"\n",
|
| 608 |
+
" if '/content/mast3r' not in sys.path:\n",
|
| 609 |
+
" sys.path.insert(0, '/content/mast3r')\n",
|
| 610 |
+
" if '/content/mast3r/dust3r' not in sys.path:\n",
|
| 611 |
+
" sys.path.insert(0, '/content/mast3r/dust3r')\n",
|
| 612 |
+
"\n",
|
| 613 |
+
" from dust3r.model import AsymmetricCroCo3DStereo\n",
|
| 614 |
+
"\n",
|
| 615 |
+
" try:\n",
|
| 616 |
+
" print(f\"Attempting to load: {Config.MAST3R_WEIGHTS}\")\n",
|
| 617 |
+
" model = AsymmetricCroCo3DStereo.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n",
|
| 618 |
+
" print(\"✓ Loaded MASt3R model\")\n",
|
| 619 |
+
" except Exception as e:\n",
|
| 620 |
+
" print(f\"⚠️ Failed to load MASt3R: {e}\")\n",
|
| 621 |
+
" print(f\"Trying DUSt3R instead: {Config.DUST3R_WEIGHTS}\")\n",
|
| 622 |
+
" model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUST3R_WEIGHTS).to(device)\n",
|
| 623 |
+
" print(\"✓ Loaded DUSt3R model as fallback\")\n",
|
| 624 |
+
"\n",
|
| 625 |
+
" model.eval()\n",
|
| 626 |
+
" print(f\"✓ Model loaded on {device}\")\n",
|
| 627 |
+
" return model\n",
|
| 628 |
+
"\n",
|
| 629 |
+
"# =====================================================================\n",
|
| 630 |
+
"# CELL 12: Feature Extraction (FIXED)\n",
|
| 631 |
+
"# =====================================================================\n",
|
| 632 |
+
"def extract_mast3r_features(model, image_paths, device, batch_size=1):\n",
|
| 633 |
+
" \"\"\"MASt3Rモデルを使用して特徴量を抽出(修正版)\"\"\"\n",
|
| 634 |
+
" print(\"\\n=== Extracting MASt3R Features ===\")\n",
|
| 635 |
+
" from dust3r.utils.image import load_images\n",
|
| 636 |
+
" from dust3r.inference import inference\n",
|
| 637 |
+
"\n",
|
| 638 |
+
" all_features = []\n",
|
| 639 |
+
"\n",
|
| 640 |
+
" for i in tqdm(range(len(image_paths)), desc=\"Features\"):\n",
|
| 641 |
+
" img_path = image_paths[i]\n",
|
| 642 |
+
"\n",
|
| 643 |
+
" # 同じ画像を2回ロード(ペアとして)\n",
|
| 644 |
+
" images = load_images([img_path, img_path], size=Config.IMAGE_SIZE)\n",
|
| 645 |
+
" pairs = [(images[0], images[1])]\n",
|
| 646 |
+
"\n",
|
| 647 |
+
" with torch.no_grad():\n",
|
| 648 |
+
" output = inference(pairs, model, device, batch_size=1)\n",
|
| 649 |
+
"\n",
|
| 650 |
+
" try:\n",
|
| 651 |
+
" # outputから特徴量を抽出(修正版)\n",
|
| 652 |
+
" if isinstance(output, dict):\n",
|
| 653 |
+
" if 'pred1' in output:\n",
|
| 654 |
+
" pred1 = output['pred1']\n",
|
| 655 |
+
" if isinstance(pred1, dict):\n",
|
| 656 |
+
" # 'desc'または'conf'を優先的に使用\n",
|
| 657 |
+
" if 'desc' in pred1:\n",
|
| 658 |
+
" desc = pred1['desc']\n",
|
| 659 |
+
" elif 'conf' in pred1:\n",
|
| 660 |
+
" desc = pred1['conf']\n",
|
| 661 |
+
" elif 'pts3d' in pred1:\n",
|
| 662 |
+
" desc = pred1['pts3d']\n",
|
| 663 |
+
" else:\n",
|
| 664 |
+
" desc = list(pred1.values())[0]\n",
|
| 665 |
+
" else:\n",
|
| 666 |
+
" desc = pred1\n",
|
| 667 |
+
" elif 'view1' in output:\n",
|
| 668 |
+
" view1 = output['view1']\n",
|
| 669 |
+
" if isinstance(view1, dict):\n",
|
| 670 |
+
" desc = view1.get('desc', view1.get('conf', view1.get('pts3d', list(view1.values())[0])))\n",
|
| 671 |
+
" else:\n",
|
| 672 |
+
" desc = view1\n",
|
| 673 |
+
" else:\n",
|
| 674 |
+
" desc = list(output.values())[0]\n",
|
| 675 |
+
" elif isinstance(output, tuple) and len(output) == 2:\n",
|
| 676 |
+
" view1, view2 = output\n",
|
| 677 |
+
" if isinstance(view1, dict):\n",
|
| 678 |
+
" desc = view1.get('desc', view1.get('conf', view1.get('pts3d', list(view1.values())[0])))\n",
|
| 679 |
+
" else:\n",
|
| 680 |
+
" desc = view1\n",
|
| 681 |
+
" elif isinstance(output, list):\n",
|
| 682 |
+
" item = output[0]\n",
|
| 683 |
+
" if isinstance(item, dict):\n",
|
| 684 |
+
" desc = item.get('desc', item.get('conf', item.get('pts3d', list(item.values())[0])))\n",
|
| 685 |
+
" else:\n",
|
| 686 |
+
" desc = item\n",
|
| 687 |
+
" else:\n",
|
| 688 |
+
" desc = output\n",
|
| 689 |
+
"\n",
|
| 690 |
+
" # テンソルをCPUに移動して保存\n",
|
| 691 |
+
" if isinstance(desc, torch.Tensor):\n",
|
| 692 |
+
" desc = desc.detach().cpu()\n",
|
| 693 |
+
"\n",
|
| 694 |
+
" # 4次元の場合はbatch次元を削除\n",
|
| 695 |
+
" if desc.dim() == 4:\n",
|
| 696 |
+
" desc = desc.squeeze(0)\n",
|
| 697 |
+
"\n",
|
| 698 |
+
" # 特徴量の次元が小さすぎる場合(RGB画像など)は平均プーリング\n",
|
| 699 |
+
" if desc.shape[-1] < 16:\n",
|
| 700 |
+
" # [H, W, 3] -> [H, W, 64] に拡張\n",
|
| 701 |
+
" desc = desc.unsqueeze(-1).repeat(1, 1, 1, 64 // desc.shape[-1]).reshape(desc.shape[0], desc.shape[1], -1)\n",
|
| 702 |
+
"\n",
|
| 703 |
+
" all_features.append(desc)\n",
|
| 704 |
+
"\n",
|
| 705 |
+
" except Exception as e:\n",
|
| 706 |
+
" print(f\"⚠️ Error extracting features for image {i}: {e}\")\n",
|
| 707 |
+
" # デフォルト特徴量\n",
|
| 708 |
+
" all_features.append(torch.zeros((Config.IMAGE_SIZE, Config.IMAGE_SIZE, 64)))\n",
|
| 709 |
+
"\n",
|
| 710 |
+
" # メモリクリア\n",
|
| 711 |
+
" del output, images, pairs\n",
|
| 712 |
+
" if i % 10 == 0:\n",
|
| 713 |
+
" torch.cuda.empty_cache()\n",
|
| 714 |
+
"\n",
|
| 715 |
+
" print(f\"✓ Extracted features for {len(all_features)} images\")\n",
|
| 716 |
+
" if all_features:\n",
|
| 717 |
+
" first_feat = all_features[0]\n",
|
| 718 |
+
" if isinstance(first_feat, torch.Tensor):\n",
|
| 719 |
+
" print(f\" Feature shape: {first_feat.shape}\")\n",
|
| 720 |
+
"\n",
|
| 721 |
+
" return all_features\n",
|
| 722 |
+
"\n",
|
| 723 |
+
"# =====================================================================\n",
|
| 724 |
+
"# CELL 13: ASMK Similarity Computation (FIXED)\n",
|
| 725 |
+
"# =====================================================================\n",
|
| 726 |
+
"def compute_asmk_similarity(features, codebook=None):\n",
|
| 727 |
+
" \"\"\"ASMKを使用して類似度行列を計算(修正版)\"\"\"\n",
|
| 728 |
+
" print(\"\\n=== Computing ASMK Similarity ===\")\n",
|
| 729 |
+
"\n",
|
| 730 |
+
" n_images = len(features)\n",
|
| 731 |
+
" similarity_matrix = np.zeros((n_images, n_images), dtype=np.float32)\n",
|
| 732 |
+
"\n",
|
| 733 |
+
" # 各特徴量をグローバル記述子に変換\n",
|
| 734 |
+
" global_features = []\n",
|
| 735 |
+
"\n",
|
| 736 |
+
" for feat in features:\n",
|
| 737 |
+
" if isinstance(feat, dict):\n",
|
| 738 |
+
" for key in ['desc', 'conf', 'pts3d']:\n",
|
| 739 |
+
" if key in feat:\n",
|
| 740 |
+
" feat = feat[key]\n",
|
| 741 |
+
" break\n",
|
| 742 |
+
"\n",
|
| 743 |
+
" if isinstance(feat, torch.Tensor):\n",
|
| 744 |
+
" feat = feat.cpu().numpy()\n",
|
| 745 |
+
"\n",
|
| 746 |
+
" if isinstance(feat, np.ndarray):\n",
|
| 747 |
+
" if feat.ndim == 3: # [H, W, C]\n",
|
| 748 |
+
" feat_flat = feat.reshape(-1, feat.shape[-1])\n",
|
| 749 |
+
" elif feat.ndim == 2: # [N, C]\n",
|
| 750 |
+
" feat_flat = feat\n",
|
| 751 |
+
" else:\n",
|
| 752 |
+
" feat_flat = feat.reshape(-1, max(feat.shape))\n",
|
| 753 |
+
"\n",
|
| 754 |
+
" global_desc = np.mean(feat_flat, axis=0)\n",
|
| 755 |
+
" global_features.append(global_desc)\n",
|
| 756 |
+
" else:\n",
|
| 757 |
+
" # ダミー特徴量\n",
|
| 758 |
+
" global_features.append(np.zeros(64))\n",
|
| 759 |
+
"\n",
|
| 760 |
+
" global_features = np.stack(global_features)\n",
|
| 761 |
+
" feature_dim = global_features.shape[1]\n",
|
| 762 |
+
"\n",
|
| 763 |
+
" print(f\"Global features shape: {global_features.shape}\")\n",
|
| 764 |
+
"\n",
|
| 765 |
+
" # コサイン類似度を使用\n",
|
| 766 |
+
" global_features_norm = global_features / (np.linalg.norm(global_features, axis=1, keepdims=True) + 1e-8)\n",
|
| 767 |
+
" similarity_matrix = global_features_norm @ global_features_norm.T\n",
|
| 768 |
+
"\n",
|
| 769 |
+
" np.fill_diagonal(similarity_matrix, -1)\n",
|
| 770 |
+
"\n",
|
| 771 |
+
" print(f\"Similarity matrix shape: {similarity_matrix.shape}\")\n",
|
| 772 |
+
" print(f\"Similarity range: [{similarity_matrix.min():.3f}, {similarity_matrix.max():.3f}]\")\n",
|
| 773 |
+
"\n",
|
| 774 |
+
" return similarity_matrix\n",
|
| 775 |
+
"\n",
|
| 776 |
+
"\n",
|
| 777 |
+
"def build_pairs_from_similarity(similarity_matrix, top_k=10):\n",
|
| 778 |
+
" \"\"\"類似度行列からペアを構築\"\"\"\n",
|
| 779 |
+
" n_images = similarity_matrix.shape[0]\n",
|
| 780 |
+
" pairs = []\n",
|
| 781 |
+
"\n",
|
| 782 |
+
" for i in range(n_images):\n",
|
| 783 |
+
" similarities = similarity_matrix[i]\n",
|
| 784 |
+
" top_indices = np.argsort(similarities)[::-1][:top_k]\n",
|
| 785 |
+
"\n",
|
| 786 |
+
" for j in top_indices:\n",
|
| 787 |
+
" if j > i:\n",
|
| 788 |
+
" pairs.append((i, j))\n",
|
| 789 |
+
"\n",
|
| 790 |
+
" pairs = list(set(pairs))\n",
|
| 791 |
+
" print(f\"✓ Built {len(pairs)} unique pairs\")\n",
|
| 792 |
+
"\n",
|
| 793 |
+
" return pairs\n",
|
| 794 |
+
"\n",
|
| 795 |
+
"\n",
|
| 796 |
+
"def get_image_pairs_asmk(image_paths, max_pairs=100):\n",
|
| 797 |
+
" \"\"\"ASMKを使用して画像ペアを取得\"\"\"\n",
|
| 798 |
+
" print(\"\\n=== Getting Image Pairs with ASMK ===\")\n",
|
| 799 |
+
"\n",
|
| 800 |
+
" device = Config.DEVICE\n",
|
| 801 |
+
" model = load_mast3r_model(device)\n",
|
| 802 |
+
" features = extract_mast3r_features(model, image_paths, device)\n",
|
| 803 |
+
" similarity_matrix = compute_asmk_similarity(features)\n",
|
| 804 |
+
" pairs = build_pairs_from_similarity(similarity_matrix, Config.RETRIEVAL_TOPK)\n",
|
| 805 |
+
"\n",
|
| 806 |
+
" # モデルを解放\n",
|
| 807 |
+
" del model\n",
|
| 808 |
+
" clear_memory()\n",
|
| 809 |
+
"\n",
|
| 810 |
+
" if len(pairs) > max_pairs:\n",
|
| 811 |
+
" pairs = pairs[:max_pairs]\n",
|
| 812 |
+
" print(f\"Limited to {max_pairs} pairs\")\n",
|
| 813 |
+
"\n",
|
| 814 |
+
" return pairs"
|
| 815 |
+
],
|
| 816 |
+
"metadata": {
|
| 817 |
+
"trusted": true,
|
| 818 |
+
"id": "qo0mGj_5jLmG"
|
| 819 |
+
},
|
| 820 |
+
"outputs": [],
|
| 821 |
+
"execution_count": 12
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"cell_type": "code",
|
| 825 |
+
"source": [
|
| 826 |
+
"# =====================================================================\n",
|
| 827 |
+
"# CELL 14: MASt3R Reconstruction\n",
|
| 828 |
+
"# =====================================================================\n",
|
| 829 |
+
"def run_mast3r_pairs(model, image_paths, pairs, device, batch_size=1):\n",
|
| 830 |
+
" \"\"\"MASt3Rでペア画像を処理(メモリ最適化版)\"\"\"\n",
|
| 831 |
+
" print(\"\\n=== Running MASt3R Reconstruction ===\")\n",
|
| 832 |
+
" from dust3r.inference import inference\n",
|
| 833 |
+
" from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n",
|
| 834 |
+
" from dust3r.utils.image import load_images\n",
|
| 835 |
+
"\n",
|
| 836 |
+
" # ペアを制限\n",
|
| 837 |
+
" max_pairs_for_memory = 50\n",
|
| 838 |
+
" if len(pairs) > max_pairs_for_memory:\n",
|
| 839 |
+
" print(f\"⚠️ Limiting pairs from {len(pairs)} to {max_pairs_for_memory} for memory\")\n",
|
| 840 |
+
" pairs = pairs[:max_pairs_for_memory]\n",
|
| 841 |
+
"\n",
|
| 842 |
+
" # ペアから画像インデックスを取得\n",
|
| 843 |
+
" pair_indices = []\n",
|
| 844 |
+
" for i, j in pairs:\n",
|
| 845 |
+
" pair_indices.extend([i, j])\n",
|
| 846 |
+
" unique_indices = sorted(set(pair_indices))\n",
|
| 847 |
+
"\n",
|
| 848 |
+
" selected_paths = [image_paths[i] for i in unique_indices]\n",
|
| 849 |
+
" print(f\"Selected {len(selected_paths)} unique images from {len(pairs)} pairs\")\n",
|
| 850 |
+
"\n",
|
| 851 |
+
" # 画像をロード\n",
|
| 852 |
+
" images = load_images(selected_paths, size=Config.IMAGE_SIZE)\n",
|
| 853 |
+
" clear_memory()\n",
|
| 854 |
+
"\n",
|
| 855 |
+
" # インデックスマッピング\n",
|
| 856 |
+
" index_map = {old_idx: new_idx for new_idx, old_idx in enumerate(unique_indices)}\n",
|
| 857 |
+
"\n",
|
| 858 |
+
" # ペア画像リストを作成\n",
|
| 859 |
+
" image_pairs = []\n",
|
| 860 |
+
" for i, j in pairs:\n",
|
| 861 |
+
" new_i = index_map[i]\n",
|
| 862 |
+
" new_j = index_map[j]\n",
|
| 863 |
+
" image_pairs.append((images[new_i], images[new_j]))\n",
|
| 864 |
+
"\n",
|
| 865 |
+
" print(f\"Created {len(image_pairs)} image pairs\")\n",
|
| 866 |
+
" clear_memory()\n",
|
| 867 |
+
"\n",
|
| 868 |
+
" # 推論を実行\n",
|
| 869 |
+
" print(f\"Running inference on {len(image_pairs)} pairs...\")\n",
|
| 870 |
+
" with torch.no_grad():\n",
|
| 871 |
+
" output = inference(image_pairs, model, device, batch_size=batch_size)\n",
|
| 872 |
+
"\n",
|
| 873 |
+
" print(f\"✓ Processed {len(output)} predictions\")\n",
|
| 874 |
+
" clear_memory()\n",
|
| 875 |
+
"\n",
|
| 876 |
+
" # Global alignment\n",
|
| 877 |
+
" scene = global_aligner(\n",
|
| 878 |
+
" dust3r_output=output,\n",
|
| 879 |
+
" device=device,\n",
|
| 880 |
+
" mode=GlobalAlignerMode.PointCloudOptimizer,\n",
|
| 881 |
+
" verbose=True\n",
|
| 882 |
+
" )\n",
|
| 883 |
+
"\n",
|
| 884 |
+
" clear_memory()\n",
|
| 885 |
+
"\n",
|
| 886 |
+
" print(\"Running global alignment...\")\n",
|
| 887 |
+
" try:\n",
|
| 888 |
+
" loss = scene.compute_global_alignment(\n",
|
| 889 |
+
" init=\"mst\",\n",
|
| 890 |
+
" niter=50,\n",
|
| 891 |
+
" schedule='cosine',\n",
|
| 892 |
+
" lr=0.01\n",
|
| 893 |
+
" )\n",
|
| 894 |
+
" print(f\"✓ Alignment complete (loss: {loss:.6f})\")\n",
|
| 895 |
+
" except RuntimeError as e:\n",
|
| 896 |
+
" if \"out of memory\" in str(e).lower():\n",
|
| 897 |
+
" print(\"⚠️ OOM during alignment, trying with fewer iterations...\")\n",
|
| 898 |
+
" clear_memory()\n",
|
| 899 |
+
" loss = scene.compute_global_alignment(\n",
|
| 900 |
+
" init=\"mst\",\n",
|
| 901 |
+
" niter=20,\n",
|
| 902 |
+
" schedule='cosine',\n",
|
| 903 |
+
" lr=0.01\n",
|
| 904 |
+
" )\n",
|
| 905 |
+
" print(f\"✓ Alignment complete with reduced iterations (loss: {loss:.6f})\")\n",
|
| 906 |
+
" else:\n",
|
| 907 |
+
" raise\n",
|
| 908 |
+
"\n",
|
| 909 |
+
" clear_memory()\n",
|
| 910 |
+
" return scene, images\n",
|
| 911 |
+
"\n",
|
| 912 |
+
"# =====================================================================\n",
|
| 913 |
+
"# CELL 15: Camera Parameter Extraction\n",
|
| 914 |
+
"# =====================================================================\n",
|
| 915 |
+
"def extract_camera_params_process2(scene, image_paths, conf_threshold=1.5):\n",
|
| 916 |
+
" \"\"\"sceneからカメラパラメータと3D点を抽出\"\"\"\n",
|
| 917 |
+
" print(\"\\n=== Extracting Camera Parameters ===\")\n",
|
| 918 |
+
"\n",
|
| 919 |
+
" cameras_dict = {}\n",
|
| 920 |
+
" all_pts3d = []\n",
|
| 921 |
+
" all_confidence = []\n",
|
| 922 |
+
"\n",
|
| 923 |
+
" try:\n",
|
| 924 |
+
" if hasattr(scene, 'get_im_poses'):\n",
|
| 925 |
+
" poses = scene.get_im_poses()\n",
|
| 926 |
+
" elif hasattr(scene, 'im_poses'):\n",
|
| 927 |
+
" poses = scene.im_poses\n",
|
| 928 |
+
" else:\n",
|
| 929 |
+
" poses = None\n",
|
| 930 |
+
"\n",
|
| 931 |
+
" if hasattr(scene, 'get_focals'):\n",
|
| 932 |
+
" focals = scene.get_focals()\n",
|
| 933 |
+
" elif hasattr(scene, 'im_focals'):\n",
|
| 934 |
+
" focals = scene.im_focals\n",
|
| 935 |
+
" else:\n",
|
| 936 |
+
" focals = None\n",
|
| 937 |
+
"\n",
|
| 938 |
+
" if hasattr(scene, 'get_principal_points'):\n",
|
| 939 |
+
" pps = scene.get_principal_points()\n",
|
| 940 |
+
" elif hasattr(scene, 'im_pp'):\n",
|
| 941 |
+
" pps = scene.im_pp\n",
|
| 942 |
+
" else:\n",
|
| 943 |
+
" pps = None\n",
|
| 944 |
+
" except Exception as e:\n",
|
| 945 |
+
" print(f\"⚠️ Error getting camera parameters: {e}\")\n",
|
| 946 |
+
" poses = None\n",
|
| 947 |
+
" focals = None\n",
|
| 948 |
+
" pps = None\n",
|
| 949 |
+
"\n",
|
| 950 |
+
" n_images = min(len(poses) if poses is not None else len(image_paths), len(image_paths))\n",
|
| 951 |
+
"\n",
|
| 952 |
+
" for idx in range(n_images):\n",
|
| 953 |
+
" img_name = os.path.basename(image_paths[idx])\n",
|
| 954 |
+
"\n",
|
| 955 |
+
" try:\n",
|
| 956 |
+
" # Poseを取得\n",
|
| 957 |
+
" if poses is not None and idx < len(poses):\n",
|
| 958 |
+
" pose = poses[idx]\n",
|
| 959 |
+
" if isinstance(pose, torch.Tensor):\n",
|
| 960 |
+
" pose = pose.detach().cpu().numpy()\n",
|
| 961 |
+
" if not isinstance(pose, np.ndarray) or pose.shape != (4, 4):\n",
|
| 962 |
+
" pose = np.eye(4)\n",
|
| 963 |
+
" else:\n",
|
| 964 |
+
" pose = np.eye(4)\n",
|
| 965 |
+
"\n",
|
| 966 |
+
" # Focalを取得\n",
|
| 967 |
+
" if focals is not None and idx < len(focals):\n",
|
| 968 |
+
" focal = focals[idx]\n",
|
| 969 |
+
" if isinstance(focal, torch.Tensor):\n",
|
| 970 |
+
" focal = focal.detach().cpu().item()\n",
|
| 971 |
+
" else:\n",
|
| 972 |
+
" focal = float(focal)\n",
|
| 973 |
+
" else:\n",
|
| 974 |
+
" focal = 1000.0\n",
|
| 975 |
+
"\n",
|
| 976 |
+
" # Principal pointを取得\n",
|
| 977 |
+
" if pps is not None and idx < len(pps):\n",
|
| 978 |
+
" pp = pps[idx]\n",
|
| 979 |
+
" if isinstance(pp, torch.Tensor):\n",
|
| 980 |
+
" pp = pp.detach().cpu().numpy()\n",
|
| 981 |
+
" else:\n",
|
| 982 |
+
" pp = np.array([112.0, 112.0])\n",
|
| 983 |
+
"\n",
|
| 984 |
+
" # カメラパラメータを保存\n",
|
| 985 |
+
" cameras_dict[img_name] = {\n",
|
| 986 |
+
" 'focal': focal,\n",
|
| 987 |
+
" 'pp': pp,\n",
|
| 988 |
+
" 'pose': pose,\n",
|
| 989 |
+
" 'width': Config.IMAGE_SIZE * 4,\n",
|
| 990 |
+
" 'height': Config.IMAGE_SIZE * 4\n",
|
| 991 |
+
" }\n",
|
| 992 |
+
"\n",
|
| 993 |
+
" # 3D点を取得\n",
|
| 994 |
+
" if hasattr(scene, 'im_pts3d') and idx < len(scene.im_pts3d):\n",
|
| 995 |
+
" pts3d_img = scene.im_pts3d[idx]\n",
|
| 996 |
+
" elif hasattr(scene, 'get_pts3d'):\n",
|
| 997 |
+
" pts3d_all = scene.get_pts3d()\n",
|
| 998 |
+
" if idx < len(pts3d_all):\n",
|
| 999 |
+
" pts3d_img = pts3d_all[idx]\n",
|
| 1000 |
+
" else:\n",
|
| 1001 |
+
" pts3d_img = None\n",
|
| 1002 |
+
" else:\n",
|
| 1003 |
+
" pts3d_img = None\n",
|
| 1004 |
+
"\n",
|
| 1005 |
+
" # Confidenceを取得\n",
|
| 1006 |
+
" if hasattr(scene, 'im_conf') and idx < len(scene.im_conf):\n",
|
| 1007 |
+
" conf_img = scene.im_conf[idx]\n",
|
| 1008 |
+
" elif hasattr(scene, 'get_conf'):\n",
|
| 1009 |
+
" conf_all = scene.get_conf()\n",
|
| 1010 |
+
" if idx < len(conf_all):\n",
|
| 1011 |
+
" conf_img = conf_all[idx]\n",
|
| 1012 |
+
" else:\n",
|
| 1013 |
+
" conf_img = None\n",
|
| 1014 |
+
" else:\n",
|
| 1015 |
+
" conf_img = None\n",
|
| 1016 |
+
"\n",
|
| 1017 |
+
" # 3D点とconfidenceを処理\n",
|
| 1018 |
+
" if pts3d_img is not None:\n",
|
| 1019 |
+
" if isinstance(pts3d_img, torch.Tensor):\n",
|
| 1020 |
+
" pts3d_img = pts3d_img.detach().cpu().numpy()\n",
|
| 1021 |
+
"\n",
|
| 1022 |
+
" if pts3d_img.ndim == 3:\n",
|
| 1023 |
+
" pts3d_flat = pts3d_img.reshape(-1, 3)\n",
|
| 1024 |
+
" else:\n",
|
| 1025 |
+
" pts3d_flat = pts3d_img\n",
|
| 1026 |
+
"\n",
|
| 1027 |
+
" all_pts3d.append(pts3d_flat)\n",
|
| 1028 |
+
"\n",
|
| 1029 |
+
" # confidenceを処理\n",
|
| 1030 |
+
" if conf_img is not None:\n",
|
| 1031 |
+
" if isinstance(conf_img, list):\n",
|
| 1032 |
+
" conf_img = np.array(conf_img)\n",
|
| 1033 |
+
" elif isinstance(conf_img, torch.Tensor):\n",
|
| 1034 |
+
" conf_img = conf_img.detach().cpu().numpy()\n",
|
| 1035 |
+
"\n",
|
| 1036 |
+
" if conf_img.ndim > 1:\n",
|
| 1037 |
+
" conf_flat = conf_img.reshape(-1)\n",
|
| 1038 |
+
" else:\n",
|
| 1039 |
+
" conf_flat = conf_img\n",
|
| 1040 |
+
"\n",
|
| 1041 |
+
" if len(conf_flat) != len(pts3d_flat):\n",
|
| 1042 |
+
" conf_flat = np.ones(len(pts3d_flat))\n",
|
| 1043 |
+
"\n",
|
| 1044 |
+
" all_confidence.append(conf_flat)\n",
|
| 1045 |
+
" else:\n",
|
| 1046 |
+
" all_confidence.append(np.ones(len(pts3d_flat)))\n",
|
| 1047 |
+
"\n",
|
| 1048 |
+
" except Exception as e:\n",
|
| 1049 |
+
" print(f\"⚠️ Error processing image {idx} ({img_name}): {e}\")\n",
|
| 1050 |
+
" cameras_dict[img_name] = {\n",
|
| 1051 |
+
" 'focal': 1000.0,\n",
|
| 1052 |
+
" 'pp': np.array([112.0, 112.0]),\n",
|
| 1053 |
+
" 'pose': np.eye(4),\n",
|
| 1054 |
+
" 'width': Config.IMAGE_SIZE * 4,\n",
|
| 1055 |
+
" 'height': Config.IMAGE_SIZE * 4\n",
|
| 1056 |
+
" }\n",
|
| 1057 |
+
" continue\n",
|
| 1058 |
+
"\n",
|
| 1059 |
+
" # 全3D点を結合\n",
|
| 1060 |
+
" if all_pts3d:\n",
|
| 1061 |
+
" pts3d = np.vstack(all_pts3d)\n",
|
| 1062 |
+
" confidence = np.concatenate(all_confidence)\n",
|
| 1063 |
+
" else:\n",
|
| 1064 |
+
" pts3d = np.zeros((0, 3))\n",
|
| 1065 |
+
" confidence = np.zeros(0)\n",
|
| 1066 |
+
"\n",
|
| 1067 |
+
" print(f\"✓ Extracted camera parameters for {len(cameras_dict)} images\")\n",
|
| 1068 |
+
" print(f\"✓ Total 3D points: {len(pts3d)}\")\n",
|
| 1069 |
+
"\n",
|
| 1070 |
+
" # Confidenceでフィルタリング\n",
|
| 1071 |
+
" if len(confidence) > 0:\n",
|
| 1072 |
+
" valid_mask = confidence > conf_threshold\n",
|
| 1073 |
+
" pts3d = pts3d[valid_mask]\n",
|
| 1074 |
+
" confidence = confidence[valid_mask]\n",
|
| 1075 |
+
" print(f\"✓ After confidence filtering (>{conf_threshold}): {len(pts3d)} points\")\n",
|
| 1076 |
+
"\n",
|
| 1077 |
+
" return cameras_dict, pts3d, confidence"
|
| 1078 |
+
],
|
| 1079 |
+
"metadata": {
|
| 1080 |
+
"trusted": true,
|
| 1081 |
+
"id": "bCXpdw83jLmG"
|
| 1082 |
+
},
|
| 1083 |
+
"outputs": [],
|
| 1084 |
+
"execution_count": 13
|
| 1085 |
+
},
|
| 1086 |
+
{
|
| 1087 |
+
"cell_type": "code",
|
| 1088 |
+
"source": [
|
| 1089 |
+
"# =====================================================================\n",
|
| 1090 |
+
"# CELL 16: COLMAP Export Functions\n",
|
| 1091 |
+
"# =====================================================================\n",
|
| 1092 |
+
"import struct\n",
|
| 1093 |
+
"from scipy.spatial.transform import Rotation as R\n",
|
| 1094 |
+
"\n",
|
| 1095 |
+
"def write_colmap_sparse(cameras_dict, pts3d, confidence, image_paths, output_dir):\n",
|
| 1096 |
+
" \"\"\"COLMAP sparse形式をバイナリファイルで出力\"\"\"\n",
|
| 1097 |
+
" os.makedirs(output_dir, exist_ok=True)\n",
|
| 1098 |
+
"\n",
|
| 1099 |
+
" if not cameras_dict:\n",
|
| 1100 |
+
" raise ValueError(\"cameras_dict is empty\")\n",
|
| 1101 |
+
"\n",
|
| 1102 |
+
" first_key = list(cameras_dict.keys())[0]\n",
|
| 1103 |
+
" first_cam = cameras_dict[first_key]\n",
|
| 1104 |
+
"\n",
|
| 1105 |
+
" w = int(first_cam.get('width', 1920))\n",
|
| 1106 |
+
" h = int(first_cam.get('height', 1080))\n",
|
| 1107 |
+
" focal = float(first_cam.get('focal', max(w, h) * 1.2))\n",
|
| 1108 |
+
" cx = w / 2.0\n",
|
| 1109 |
+
" cy = h / 2.0\n",
|
| 1110 |
+
"\n",
|
| 1111 |
+
" # cameras.bin\n",
|
| 1112 |
+
" cameras_file = os.path.join(output_dir, 'cameras.bin')\n",
|
| 1113 |
+
" with open(cameras_file, 'wb') as f:\n",
|
| 1114 |
+
" f.write(struct.pack('Q', 1))\n",
|
| 1115 |
+
" camera_id = 1\n",
|
| 1116 |
+
" model_id = 1 # PINHOLE\n",
|
| 1117 |
+
" f.write(struct.pack('i', camera_id))\n",
|
| 1118 |
+
" f.write(struct.pack('i', model_id))\n",
|
| 1119 |
+
" f.write(struct.pack('Q', w))\n",
|
| 1120 |
+
" f.write(struct.pack('Q', h))\n",
|
| 1121 |
+
" f.write(struct.pack('d', focal))\n",
|
| 1122 |
+
" f.write(struct.pack('d', focal))\n",
|
| 1123 |
+
" f.write(struct.pack('d', cx))\n",
|
| 1124 |
+
" f.write(struct.pack('d', cy))\n",
|
| 1125 |
+
"\n",
|
| 1126 |
+
" print(f\"✓ Written cameras.bin\")\n",
|
| 1127 |
+
"\n",
|
| 1128 |
+
" # images.bin\n",
|
| 1129 |
+
" images_file = os.path.join(output_dir, 'images.bin')\n",
|
| 1130 |
+
" with open(images_file, 'wb') as f:\n",
|
| 1131 |
+
" f.write(struct.pack('Q', len(image_paths)))\n",
|
| 1132 |
+
"\n",
|
| 1133 |
+
" for i, img_path in enumerate(image_paths):\n",
|
| 1134 |
+
" img_name = os.path.basename(img_path)\n",
|
| 1135 |
+
"\n",
|
| 1136 |
+
" cam_info = cameras_dict.get(img_name)\n",
|
| 1137 |
+
" if cam_info is None:\n",
|
| 1138 |
+
" pose = np.eye(4)\n",
|
| 1139 |
+
" else:\n",
|
| 1140 |
+
" pose = cam_info['pose']\n",
|
| 1141 |
+
"\n",
|
| 1142 |
+
" try:\n",
|
| 1143 |
+
" w2c = np.linalg.inv(pose)\n",
|
| 1144 |
+
" except np.linalg.LinAlgError:\n",
|
| 1145 |
+
" w2c = np.eye(4)\n",
|
| 1146 |
+
"\n",
|
| 1147 |
+
" rot_mat = w2c[:3, :3]\n",
|
| 1148 |
+
" tvec = w2c[:3, 3]\n",
|
| 1149 |
+
" quat = R.from_matrix(rot_mat).as_quat()\n",
|
| 1150 |
+
" qw, qx, qy, qz = quat[3], quat[0], quat[1], quat[2]\n",
|
| 1151 |
+
"\n",
|
| 1152 |
+
" image_id = i + 1\n",
|
| 1153 |
+
" f.write(struct.pack('i', image_id))\n",
|
| 1154 |
+
" f.write(struct.pack('d', qw))\n",
|
| 1155 |
+
" f.write(struct.pack('d', qx))\n",
|
| 1156 |
+
" f.write(struct.pack('d', qy))\n",
|
| 1157 |
+
" f.write(struct.pack('d', qz))\n",
|
| 1158 |
+
" f.write(struct.pack('d', tvec[0]))\n",
|
| 1159 |
+
" f.write(struct.pack('d', tvec[1]))\n",
|
| 1160 |
+
" f.write(struct.pack('d', tvec[2]))\n",
|
| 1161 |
+
" f.write(struct.pack('i', 1))\n",
|
| 1162 |
+
" img_name_bytes = img_name.encode('utf-8') + b'\\x00'\n",
|
| 1163 |
+
" f.write(img_name_bytes)\n",
|
| 1164 |
+
" f.write(struct.pack('Q', 0))\n",
|
| 1165 |
+
"\n",
|
| 1166 |
+
" print(f\"✓ Written images.bin ({len(image_paths)} images)\")\n",
|
| 1167 |
+
"\n",
|
| 1168 |
+
" # points3D.bin\n",
|
| 1169 |
+
" points_file = os.path.join(output_dir, 'points3D.bin')\n",
|
| 1170 |
+
" with open(points_file, 'wb') as f:\n",
|
| 1171 |
+
" f.write(struct.pack('Q', len(pts3d)))\n",
|
| 1172 |
+
"\n",
|
| 1173 |
+
" for point_id, point in enumerate(pts3d, start=1):\n",
|
| 1174 |
+
" f.write(struct.pack('Q', point_id))\n",
|
| 1175 |
+
" f.write(struct.pack('d', point[0]))\n",
|
| 1176 |
+
" f.write(struct.pack('d', point[1]))\n",
|
| 1177 |
+
" f.write(struct.pack('d', point[2]))\n",
|
| 1178 |
+
" f.write(struct.pack('B', 255))\n",
|
| 1179 |
+
" f.write(struct.pack('B', 255))\n",
|
| 1180 |
+
" f.write(struct.pack('B', 255))\n",
|
| 1181 |
+
" f.write(struct.pack('d', 0.0))\n",
|
| 1182 |
+
" f.write(struct.pack('Q', 0))\n",
|
| 1183 |
+
"\n",
|
| 1184 |
+
" print(f\"✓ Written points3D.bin ({len(pts3d)} points)\")\n",
|
| 1185 |
+
"\n",
|
| 1186 |
+
" # テキスト形式も出力\n",
|
| 1187 |
+
" write_text_versions(cameras_dict, pts3d, image_paths, output_dir, w, h, focal, cx, cy)\n",
|
| 1188 |
+
"\n",
|
| 1189 |
+
" print(f\"\\n✓ COLMAP sparse reconstruction saved\")\n",
|
| 1190 |
+
" return output_dir\n",
|
| 1191 |
+
"\n",
|
| 1192 |
+
"\n",
|
| 1193 |
+
"def write_text_versions(cameras_dict, pts3d, image_paths, output_dir, w, h, focal, cx, cy):\n",
|
| 1194 |
+
" \"\"\"テキスト形式を出力\"\"\"\n",
|
| 1195 |
+
"\n",
|
| 1196 |
+
" # cameras.txt\n",
|
| 1197 |
+
" with open(os.path.join(output_dir, 'cameras.txt'), 'w') as file:\n",
|
| 1198 |
+
" file.write(\"# Camera list with one line of data per camera:\\n\")\n",
|
| 1199 |
+
" file.write(\"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\\n\")\n",
|
| 1200 |
+
" file.write(f\"1 PINHOLE {w} {h} {focal} {focal} {cx} {cy}\\n\")\n",
|
| 1201 |
+
"\n",
|
| 1202 |
+
" # images.txt\n",
|
| 1203 |
+
" with open(os.path.join(output_dir, 'images.txt'), 'w') as file:\n",
|
| 1204 |
+
" file.write(\"# Image list with two lines of data per image:\\n\")\n",
|
| 1205 |
+
" file.write(\"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n\")\n",
|
| 1206 |
+
" file.write(\"# POINTS2D[] as (X, Y, POINT3D_ID)\\n\")\n",
|
| 1207 |
+
"\n",
|
| 1208 |
+
" for i, img_path in enumerate(image_paths):\n",
|
| 1209 |
+
" img_name = os.path.basename(img_path)\n",
|
| 1210 |
+
" cam_info = cameras_dict.get(img_name)\n",
|
| 1211 |
+
"\n",
|
| 1212 |
+
" if cam_info is None:\n",
|
| 1213 |
+
" pose = np.eye(4)\n",
|
| 1214 |
+
" else:\n",
|
| 1215 |
+
" pose = cam_info['pose']\n",
|
| 1216 |
+
"\n",
|
| 1217 |
+
" try:\n",
|
| 1218 |
+
" w2c = np.linalg.inv(pose)\n",
|
| 1219 |
+
" except np.linalg.LinAlgError:\n",
|
| 1220 |
+
" w2c = np.eye(4)\n",
|
| 1221 |
+
"\n",
|
| 1222 |
+
" rot_mat = w2c[:3, :3]\n",
|
| 1223 |
+
" tvec = w2c[:3, 3]\n",
|
| 1224 |
+
" quat = R.from_matrix(rot_mat).as_quat()\n",
|
| 1225 |
+
" qw, qx, qy, qz = quat[3], quat[0], quat[1], quat[2]\n",
|
| 1226 |
+
"\n",
|
| 1227 |
+
" image_id = i + 1\n",
|
| 1228 |
+
" file.write(f\"{image_id} {qw} {qx} {qy} {qz} {tvec[0]} {tvec[1]} {tvec[2]} 1 {img_name}\\n\")\n",
|
| 1229 |
+
" file.write(\"\\n\")\n",
|
| 1230 |
+
"\n",
|
| 1231 |
+
" # points3D.txt\n",
|
| 1232 |
+
" with open(os.path.join(output_dir, 'points3D.txt'), 'w') as file:\n",
|
| 1233 |
+
" file.write(\"# 3D point list with one line of data per point:\\n\")\n",
|
| 1234 |
+
" file.write(\"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[]\\n\")\n",
|
| 1235 |
+
"\n",
|
| 1236 |
+
" for point_id, point in enumerate(pts3d, start=1):\n",
|
| 1237 |
+
" file.write(f\"{point_id} {point[0]} {point[1]} {point[2]} 255 255 255 0.0\\n\")\n",
|
| 1238 |
+
"\n",
|
| 1239 |
+
"# =====================================================================\n",
|
| 1240 |
+
"# CELL 17: Gaussian Splatting Runner\n",
|
| 1241 |
+
"# =====================================================================\n",
|
| 1242 |
+
"def run_gaussian_splatting(source_dir, output_dir, iterations=30000):\n",
|
| 1243 |
+
" \"\"\"Gaussian Splattingを実行\"\"\"\n",
|
| 1244 |
+
" print(\"\\n=== Running Gaussian Splatting ===\")\n",
|
| 1245 |
+
"\n",
|
| 1246 |
+
" os.makedirs(output_dir, exist_ok=True)\n",
|
| 1247 |
+
"\n",
|
| 1248 |
+
" cmd = [\n",
|
| 1249 |
+
" \"python\", \"/content/gaussian-splatting/train.py\",\n",
|
| 1250 |
+
" \"-s\", source_dir,\n",
|
| 1251 |
+
" \"-m\", output_dir,\n",
|
| 1252 |
+
" \"--iterations\", str(iterations),\n",
|
| 1253 |
+
" \"--eval\"\n",
|
| 1254 |
+
" ]\n",
|
| 1255 |
+
"\n",
|
| 1256 |
+
" print(f\"Command: {' '.join(cmd)}\")\n",
|
| 1257 |
+
" print(f\" Source: {source_dir}\")\n",
|
| 1258 |
+
" print(f\" Output: {output_dir}\")\n",
|
| 1259 |
+
"\n",
|
| 1260 |
+
" result = subprocess.run(cmd, capture_output=False, text=True)\n",
|
| 1261 |
+
"\n",
|
| 1262 |
+
" if result.returncode == 0:\n",
|
| 1263 |
+
" print(f\"\\n✓ Gaussian Splatting complete\")\n",
|
| 1264 |
+
"\n",
|
| 1265 |
+
" point_cloud_dir = os.path.join(output_dir, \"point_cloud\")\n",
|
| 1266 |
+
" if os.path.exists(point_cloud_dir):\n",
|
| 1267 |
+
" print(f\"\\n✓ Point cloud directory found: {point_cloud_dir}\")\n",
|
| 1268 |
+
"\n",
|
| 1269 |
+
" for item in sorted(os.listdir(point_cloud_dir)):\n",
|
| 1270 |
+
" item_path = os.path.join(point_cloud_dir, item)\n",
|
| 1271 |
+
" if os.path.isdir(item_path) and item.startswith(\"iteration_\"):\n",
|
| 1272 |
+
" ply_file = os.path.join(item_path, \"point_cloud.ply\")\n",
|
| 1273 |
+
" if os.path.exists(ply_file):\n",
|
| 1274 |
+
" file_size = os.path.getsize(ply_file) / (1024 * 1024)\n",
|
| 1275 |
+
" print(f\" ✓ {item}/point_cloud.ply ({file_size:.2f} MB)\")\n",
|
| 1276 |
+
" else:\n",
|
| 1277 |
+
" print(f\"\\n✗ Gaussian Splatting failed with return code {result.returncode}\")\n",
|
| 1278 |
+
"\n",
|
| 1279 |
+
" return output_dir"
|
| 1280 |
+
],
|
| 1281 |
+
"metadata": {
|
| 1282 |
+
"trusted": true,
|
| 1283 |
+
"id": "1yyRoxHKjLmH"
|
| 1284 |
+
},
|
| 1285 |
+
"outputs": [],
|
| 1286 |
+
"execution_count": 14
|
| 1287 |
+
},
|
| 1288 |
+
{
|
| 1289 |
+
"cell_type": "code",
|
| 1290 |
+
"source": [
|
| 1291 |
+
"# =====================================================================\n",
|
| 1292 |
+
"# CELL 18: Main Pipeline\n",
|
| 1293 |
+
"# =====================================================================\n",
|
| 1294 |
+
"def main_pipeline(image_dir, output_dir, square_size=1024, iterations=30000,\n",
|
| 1295 |
+
" max_images=200, max_pairs=100, max_points=500000,\n",
|
| 1296 |
+
" conf_threshold=1.5, preprocess_mode='none'):\n",
|
| 1297 |
+
" \"\"\"メインパイプライン(修正版)\"\"\"\n",
|
| 1298 |
+
"\n",
|
| 1299 |
+
" # STEP 0: Image Preprocessing\n",
|
| 1300 |
+
" if preprocess_mode == 'biplet':\n",
|
| 1301 |
+
" print(\"=\"*70)\n",
|
| 1302 |
+
" print(\"STEP 0: Image Preprocessing (Biplet Crops)\")\n",
|
| 1303 |
+
" print(\"=\"*70)\n",
|
| 1304 |
+
"\n",
|
| 1305 |
+
" temp_biplet_dir = os.path.join(output_dir, \"temp_biplet\")\n",
|
| 1306 |
+
" biplet_dir = normalize_image_sizes_biplet(image_dir, temp_biplet_dir, size=square_size)\n",
|
| 1307 |
+
"\n",
|
| 1308 |
+
" images_dir = os.path.join(output_dir, \"images\")\n",
|
| 1309 |
+
" os.makedirs(images_dir, exist_ok=True)\n",
|
| 1310 |
+
"\n",
|
| 1311 |
+
" biplet_suffixes = ['_left', '_right', '_top', '_bottom']\n",
|
| 1312 |
+
" copied_count = 0\n",
|
| 1313 |
+
"\n",
|
| 1314 |
+
" for img_file in os.listdir(temp_biplet_dir):\n",
|
| 1315 |
+
" if any(suffix in img_file for suffix in biplet_suffixes):\n",
|
| 1316 |
+
" src = os.path.join(temp_biplet_dir, img_file)\n",
|
| 1317 |
+
" dst = os.path.join(images_dir, img_file)\n",
|
| 1318 |
+
" shutil.copy2(src, dst)\n",
|
| 1319 |
+
" copied_count += 1\n",
|
| 1320 |
+
"\n",
|
| 1321 |
+
" print(f\"✓ Copied {copied_count} biplet images to {images_dir}\")\n",
|
| 1322 |
+
"\n",
|
| 1323 |
+
" original_images_dir = os.path.join(output_dir, \"original_images\")\n",
|
| 1324 |
+
" os.makedirs(original_images_dir, exist_ok=True)\n",
|
| 1325 |
+
"\n",
|
| 1326 |
+
" original_count = 0\n",
|
| 1327 |
+
" valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp')\n",
|
| 1328 |
+
" for img_file in os.listdir(image_dir):\n",
|
| 1329 |
+
" if img_file.lower().endswith(valid_extensions):\n",
|
| 1330 |
+
" src = os.path.join(image_dir, img_file)\n",
|
| 1331 |
+
" dst = os.path.join(original_images_dir, img_file)\n",
|
| 1332 |
+
" shutil.copy2(src, dst)\n",
|
| 1333 |
+
" original_count += 1\n",
|
| 1334 |
+
"\n",
|
| 1335 |
+
" print(f\"✓ Saved {original_count} original images to {original_images_dir}\")\n",
|
| 1336 |
+
" shutil.rmtree(temp_biplet_dir)\n",
|
| 1337 |
+
" image_dir = images_dir\n",
|
| 1338 |
+
" clear_memory()\n",
|
| 1339 |
+
" else:\n",
|
| 1340 |
+
" images_dir = os.path.join(output_dir, \"images\")\n",
|
| 1341 |
+
" if not os.path.exists(images_dir):\n",
|
| 1342 |
+
" print(\"=\"*70)\n",
|
| 1343 |
+
" print(\"STEP 0: Copying images to output directory\")\n",
|
| 1344 |
+
" print(\"=\"*70)\n",
|
| 1345 |
+
" shutil.copytree(image_dir, images_dir)\n",
|
| 1346 |
+
" print(f\"✓ Copied images to {images_dir}\")\n",
|
| 1347 |
+
" image_dir = images_dir\n",
|
| 1348 |
+
"\n",
|
| 1349 |
+
" # STEP 1: Loading Images\n",
|
| 1350 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1351 |
+
" print(\"STEP 1: Loading and Preparing Images\")\n",
|
| 1352 |
+
" print(\"=\"*70)\n",
|
| 1353 |
+
"\n",
|
| 1354 |
+
" image_paths = load_images_from_directory(image_dir, max_images=max_images)\n",
|
| 1355 |
+
" print(f\"Loaded {len(image_paths)} images\")\n",
|
| 1356 |
+
" clear_memory()\n",
|
| 1357 |
+
"\n",
|
| 1358 |
+
" # STEP 2: Image Pair Selection\n",
|
| 1359 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1360 |
+
" print(\"STEP 2: Image Pair Selection\")\n",
|
| 1361 |
+
" print(\"=\"*70)\n",
|
| 1362 |
+
"\n",
|
| 1363 |
+
" max_pairs = min(max_pairs, 50)\n",
|
| 1364 |
+
" pairs = get_image_pairs_asmk(image_paths, max_pairs=max_pairs)\n",
|
| 1365 |
+
" print(f\"Selected {len(pairs)} image pairs\")\n",
|
| 1366 |
+
" clear_memory()\n",
|
| 1367 |
+
"\n",
|
| 1368 |
+
" # STEP 3: MASt3R 3D Reconstruction\n",
|
| 1369 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1370 |
+
" print(\"STEP 3: MASt3R 3D Reconstruction\")\n",
|
| 1371 |
+
" print(\"=\"*70)\n",
|
| 1372 |
+
"\n",
|
| 1373 |
+
" device = Config.DEVICE\n",
|
| 1374 |
+
" model = load_mast3r_model(device)\n",
|
| 1375 |
+
" scene, mast3r_images = run_mast3r_pairs(model, image_paths, pairs, device)\n",
|
| 1376 |
+
"\n",
|
| 1377 |
+
" del model\n",
|
| 1378 |
+
" clear_memory()\n",
|
| 1379 |
+
"\n",
|
| 1380 |
+
" # STEP 4: Converting to COLMAP\n",
|
| 1381 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1382 |
+
" print(\"STEP 4: Converting to COLMAP (PINHOLE)\")\n",
|
| 1383 |
+
" print(\"=\"*70)\n",
|
| 1384 |
+
"\n",
|
| 1385 |
+
" cameras_dict, pts3d, confidence = extract_camera_params_process2(\n",
|
| 1386 |
+
" scene, image_paths, conf_threshold=conf_threshold\n",
|
| 1387 |
+
" )\n",
|
| 1388 |
+
"\n",
|
| 1389 |
+
" del scene\n",
|
| 1390 |
+
" clear_memory()\n",
|
| 1391 |
+
"\n",
|
| 1392 |
+
" if len(pts3d) > max_points:\n",
|
| 1393 |
+
" print(f\"⚠️ Limiting points from {len(pts3d)} to {max_points}\")\n",
|
| 1394 |
+
" indices = np.random.choice(len(pts3d), max_points, replace=False)\n",
|
| 1395 |
+
" pts3d = pts3d[indices]\n",
|
| 1396 |
+
" confidence = confidence[indices]\n",
|
| 1397 |
+
"\n",
|
| 1398 |
+
" print(f\"Final point count: {len(pts3d)}\")\n",
|
| 1399 |
+
"\n",
|
| 1400 |
+
" colmap_dir = os.path.join(output_dir, \"sparse/0\")\n",
|
| 1401 |
+
" os.makedirs(colmap_dir, exist_ok=True)\n",
|
| 1402 |
+
"\n",
|
| 1403 |
+
" write_colmap_sparse(cameras_dict, pts3d, confidence, image_paths, colmap_dir)\n",
|
| 1404 |
+
" clear_memory()\n",
|
| 1405 |
+
"\n",
|
| 1406 |
+
" # STEP 5: Running Gaussian Splatting\n",
|
| 1407 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1408 |
+
" print(\"STEP 5: Running Gaussian Splatting\")\n",
|
| 1409 |
+
" print(\"=\"*70)\n",
|
| 1410 |
+
"\n",
|
| 1411 |
+
" source_dir = output_dir\n",
|
| 1412 |
+
" model_output_dir = os.path.join(output_dir, \"gaussian_splatting\")\n",
|
| 1413 |
+
"\n",
|
| 1414 |
+
" gs_output = run_gaussian_splatting(\n",
|
| 1415 |
+
" source_dir=source_dir,\n",
|
| 1416 |
+
" output_dir=model_output_dir,\n",
|
| 1417 |
+
" iterations=iterations\n",
|
| 1418 |
+
" )\n",
|
| 1419 |
+
"\n",
|
| 1420 |
+
" # STEP 6: Verify Output\n",
|
| 1421 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1422 |
+
" print(\"PIPELINE COMPLETE\")\n",
|
| 1423 |
+
" print(\"=\"*70)\n",
|
| 1424 |
+
"\n",
|
| 1425 |
+
" ply_path = os.path.join(\n",
|
| 1426 |
+
" model_output_dir,\n",
|
| 1427 |
+
" \"point_cloud\",\n",
|
| 1428 |
+
" f\"iteration_{iterations}\",\n",
|
| 1429 |
+
" \"point_cloud.ply\"\n",
|
| 1430 |
+
" )\n",
|
| 1431 |
+
"\n",
|
| 1432 |
+
" if os.path.exists(ply_path):\n",
|
| 1433 |
+
" file_size = os.path.getsize(ply_path) / (1024 * 1024)\n",
|
| 1434 |
+
" print(f\"✓ Point cloud generated: {ply_path}\")\n",
|
| 1435 |
+
" print(f\" Size: {file_size:.2f} MB\")\n",
|
| 1436 |
+
" else:\n",
|
| 1437 |
+
" print(f\"⚠️ Point cloud not found at: {ply_path}\")\n",
|
| 1438 |
+
"\n",
|
| 1439 |
+
" print(f\"\\nOutput directory structure:\")\n",
|
| 1440 |
+
" print(f\" {output_dir}/\")\n",
|
| 1441 |
+
" print(f\" ├── images/ (processed images)\")\n",
|
| 1442 |
+
" if preprocess_mode == 'biplet':\n",
|
| 1443 |
+
" print(f\" ├── original_images/ (original source images)\")\n",
|
| 1444 |
+
" print(f\" ├── sparse/0/ (COLMAP data)\")\n",
|
| 1445 |
+
" print(f\" └── gaussian_splatting/ (GS output)\")\n",
|
| 1446 |
+
"\n",
|
| 1447 |
+
" return gs_output\n",
|
| 1448 |
+
"\n",
|
| 1449 |
+
"# =====================================================================\n",
|
| 1450 |
+
"# CELL 19: Verify Setup\n",
|
| 1451 |
+
"# =====================================================================\n",
|
| 1452 |
+
"print(f\"✓ np: {np.__version__} - {np.__file__}\")\n",
|
| 1453 |
+
"!pip show numpy | grep Version\n",
|
| 1454 |
+
"\n",
|
| 1455 |
+
"try:\n",
|
| 1456 |
+
" import roma\n",
|
| 1457 |
+
" print(\"✓ roma is installed\")\n",
|
| 1458 |
+
"except ModuleNotFoundError:\n",
|
| 1459 |
+
" print(\"⚠️ roma not found, installing...\")\n",
|
| 1460 |
+
" !pip install roma\n",
|
| 1461 |
+
" import roma\n",
|
| 1462 |
+
" print(\"✓ roma installed\")"
|
| 1463 |
+
],
|
| 1464 |
+
"metadata": {
|
| 1465 |
+
"trusted": true,
|
| 1466 |
+
"id": "bHKT_3EZjLmH",
|
| 1467 |
+
"colab": {
|
| 1468 |
+
"base_uri": "https://localhost:8080/"
|
| 1469 |
+
},
|
| 1470 |
+
"outputId": "73f6ce48-e784-4136-9726-d542f4a03bed"
|
| 1471 |
+
},
|
| 1472 |
+
"outputs": [
|
| 1473 |
+
{
|
| 1474 |
+
"output_type": "stream",
|
| 1475 |
+
"name": "stdout",
|
| 1476 |
+
"text": [
|
| 1477 |
+
"✓ np: 1.26.4 - /usr/local/lib/python3.12/dist-packages/numpy/__init__.py\n",
|
| 1478 |
+
"Version: 1.26.4\n",
|
| 1479 |
+
"Version 3.1, 31 March 2009\n",
|
| 1480 |
+
" Version 3, 29 June 2007\n",
|
| 1481 |
+
" 5. Conveying Modified Source Versions.\n",
|
| 1482 |
+
" 14. Revised Versions of this License.\n",
|
| 1483 |
+
"✓ roma is installed\n"
|
| 1484 |
+
]
|
| 1485 |
+
}
|
| 1486 |
+
],
|
| 1487 |
+
"execution_count": 15
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"cell_type": "code",
|
| 1491 |
+
"source": [
|
| 1492 |
+
"# =====================================================================\n",
|
| 1493 |
+
"# CELL 20: Run Pipeline\n",
|
| 1494 |
+
"# =====================================================================\n",
|
| 1495 |
+
"if __name__ == \"__main__\":\n",
|
| 1496 |
+
" IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain\"\n",
|
| 1497 |
+
" OUTPUT_DIR = \"/content/output\"\n",
|
| 1498 |
+
"\n",
|
| 1499 |
+
" gs_output = main_pipeline(\n",
|
| 1500 |
+
" image_dir=IMAGE_DIR,\n",
|
| 1501 |
+
" output_dir=OUTPUT_DIR,\n",
|
| 1502 |
+
" square_size=1024, # 512→384 に削減(メモリ削減)\n",
|
| 1503 |
+
" iterations=2000, # そのまま\n",
|
| 1504 |
+
" max_images=10, # そのまま\n",
|
| 1505 |
+
" max_pairs=10, # そのまま\n",
|
| 1506 |
+
" max_points=60000, # 1000→50000 に増加(品質向上)★重要\n",
|
| 1507 |
+
" conf_threshold=1.5, # そのまま\n",
|
| 1508 |
+
" preprocess_mode='biplet'\n",
|
| 1509 |
+
" )\n",
|
| 1510 |
+
"\n",
|
| 1511 |
+
" print(\"\\n\" + \"=\"*70)\n",
|
| 1512 |
+
" print(\"PIPELINE COMPLETE\")\n",
|
| 1513 |
+
" print(\"=\"*70)\n",
|
| 1514 |
+
" print(f\"Output directory: {gs_output}\")"
|
| 1515 |
+
],
|
| 1516 |
+
"metadata": {
|
| 1517 |
+
"trusted": true,
|
| 1518 |
+
"id": "n6ZHOb8TjLmI",
|
| 1519 |
+
"colab": {
|
| 1520 |
+
"base_uri": "https://localhost:8080/"
|
| 1521 |
+
},
|
| 1522 |
+
"outputId": "934ddca4-cefd-4083-9922-ee70d466ef15"
|
| 1523 |
+
},
|
| 1524 |
+
"outputs": [
|
| 1525 |
+
{
|
| 1526 |
+
"output_type": "stream",
|
| 1527 |
+
"name": "stdout",
|
| 1528 |
+
"text": [
|
| 1529 |
+
"======================================================================\n",
|
| 1530 |
+
"STEP 0: Image Preprocessing (Biplet Crops)\n",
|
| 1531 |
+
"======================================================================\n",
|
| 1532 |
+
"\n",
|
| 1533 |
+
"=== Generating Biplet Crops (1024x1024) ===\n"
|
| 1534 |
+
]
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"output_type": "stream",
|
| 1538 |
+
"name": "stderr",
|
| 1539 |
+
"text": [
|
| 1540 |
+
"Creating biplets: 100%|██████████| 30/30 [00:03<00:00, 8.01it/s]\n"
|
| 1541 |
+
]
|
| 1542 |
+
},
|
| 1543 |
+
{
|
| 1544 |
+
"output_type": "stream",
|
| 1545 |
+
"name": "stdout",
|
| 1546 |
+
"text": [
|
| 1547 |
+
"\n",
|
| 1548 |
+
"✓ Biplet generation complete:\n",
|
| 1549 |
+
" Source images: 30\n",
|
| 1550 |
+
" Biplet crops generated: 60\n",
|
| 1551 |
+
" Original size distribution: {'1440x1920': 30}\n",
|
| 1552 |
+
"✓ Copied 60 biplet images to /content/output/images\n",
|
| 1553 |
+
"✓ Saved 30 original images to /content/output/original_images\n",
|
| 1554 |
+
"\n",
|
| 1555 |
+
"======================================================================\n",
|
| 1556 |
+
"STEP 1: Loading and Preparing Images\n",
|
| 1557 |
+
"======================================================================\n",
|
| 1558 |
+
"\n",
|
| 1559 |
+
"Loading images from: /content/output/images\n",
|
| 1560 |
+
"⚠️ Limiting from 60 to 10 images\n",
|
| 1561 |
+
"✓ Found 10 images\n",
|
| 1562 |
+
"Loaded 10 images\n",
|
| 1563 |
+
"\n",
|
| 1564 |
+
"======================================================================\n",
|
| 1565 |
+
"STEP 2: Image Pair Selection\n",
|
| 1566 |
+
"======================================================================\n",
|
| 1567 |
+
"\n",
|
| 1568 |
+
"=== Getting Image Pairs with ASMK ===\n",
|
| 1569 |
+
"\n",
|
| 1570 |
+
"=== Loading MASt3R Model ===\n",
|
| 1571 |
+
"Attempting to load: naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\n",
|
| 1572 |
+
"⚠️ Failed to load MASt3R: tried to load naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric from huggingface, but failed\n",
|
| 1573 |
+
"Trying DUSt3R instead: naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\n",
|
| 1574 |
+
"✓ Loaded DUSt3R model as fallback\n",
|
| 1575 |
+
"✓ Model loaded on cuda\n",
|
| 1576 |
+
"\n",
|
| 1577 |
+
"=== Extracting MASt3R Features ===\n"
|
| 1578 |
+
]
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"output_type": "stream",
|
| 1582 |
+
"name": "stderr",
|
| 1583 |
+
"text": [
|
| 1584 |
+
"\rFeatures: 0%| | 0/10 [00:00<?, ?it/s]"
|
| 1585 |
+
]
|
| 1586 |
+
},
|
| 1587 |
+
{
|
| 1588 |
+
"output_type": "stream",
|
| 1589 |
+
"name": "stdout",
|
| 1590 |
+
"text": [
|
| 1591 |
+
">> Loading a list of 2 images\n",
|
| 1592 |
+
" - adding /content/output/images/image_001_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1593 |
+
" - adding /content/output/images/image_001_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1594 |
+
" (Found 2 images)\n",
|
| 1595 |
+
">> Inference with model on 1 image pairs\n"
|
| 1596 |
+
]
|
| 1597 |
+
},
|
| 1598 |
+
{
|
| 1599 |
+
"output_type": "stream",
|
| 1600 |
+
"name": "stderr",
|
| 1601 |
+
"text": [
|
| 1602 |
+
"\n",
|
| 1603 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1604 |
+
"100%|██████████| 1/1 [00:00<00:00, 3.55it/s]\n",
|
| 1605 |
+
"Features: 10%|█ | 1/10 [00:00<00:03, 2.89it/s]"
|
| 1606 |
+
]
|
| 1607 |
+
},
|
| 1608 |
+
{
|
| 1609 |
+
"output_type": "stream",
|
| 1610 |
+
"name": "stdout",
|
| 1611 |
+
"text": [
|
| 1612 |
+
">> Loading a list of 2 images\n",
|
| 1613 |
+
" - adding /content/output/images/image_001_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1614 |
+
" - adding /content/output/images/image_001_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1615 |
+
" (Found 2 images)\n",
|
| 1616 |
+
">> Inference with model on 1 image pairs\n"
|
| 1617 |
+
]
|
| 1618 |
+
},
|
| 1619 |
+
{
|
| 1620 |
+
"output_type": "stream",
|
| 1621 |
+
"name": "stderr",
|
| 1622 |
+
"text": [
|
| 1623 |
+
"\n",
|
| 1624 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1625 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.07it/s]\n",
|
| 1626 |
+
"Features: 20%|██ | 2/10 [00:00<00:02, 3.35it/s]"
|
| 1627 |
+
]
|
| 1628 |
+
},
|
| 1629 |
+
{
|
| 1630 |
+
"output_type": "stream",
|
| 1631 |
+
"name": "stdout",
|
| 1632 |
+
"text": [
|
| 1633 |
+
">> Loading a list of 2 images\n",
|
| 1634 |
+
" - adding /content/output/images/image_002_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1635 |
+
" - adding /content/output/images/image_002_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1636 |
+
" (Found 2 images)\n",
|
| 1637 |
+
">> Inference with model on 1 image pairs\n"
|
| 1638 |
+
]
|
| 1639 |
+
},
|
| 1640 |
+
{
|
| 1641 |
+
"output_type": "stream",
|
| 1642 |
+
"name": "stderr",
|
| 1643 |
+
"text": [
|
| 1644 |
+
"\n",
|
| 1645 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1646 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.14it/s]\n",
|
| 1647 |
+
"Features: 30%|███ | 3/10 [00:00<00:01, 3.61it/s]"
|
| 1648 |
+
]
|
| 1649 |
+
},
|
| 1650 |
+
{
|
| 1651 |
+
"output_type": "stream",
|
| 1652 |
+
"name": "stdout",
|
| 1653 |
+
"text": [
|
| 1654 |
+
">> Loading a list of 2 images\n",
|
| 1655 |
+
" - adding /content/output/images/image_002_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1656 |
+
" - adding /content/output/images/image_002_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1657 |
+
" (Found 2 images)\n",
|
| 1658 |
+
">> Inference with model on 1 image pairs\n"
|
| 1659 |
+
]
|
| 1660 |
+
},
|
| 1661 |
+
{
|
| 1662 |
+
"output_type": "stream",
|
| 1663 |
+
"name": "stderr",
|
| 1664 |
+
"text": [
|
| 1665 |
+
"\n",
|
| 1666 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1667 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.15it/s]\n",
|
| 1668 |
+
"Features: 40%|████ | 4/10 [00:01<00:01, 3.75it/s]"
|
| 1669 |
+
]
|
| 1670 |
+
},
|
| 1671 |
+
{
|
| 1672 |
+
"output_type": "stream",
|
| 1673 |
+
"name": "stdout",
|
| 1674 |
+
"text": [
|
| 1675 |
+
">> Loading a list of 2 images\n",
|
| 1676 |
+
" - adding /content/output/images/image_003_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1677 |
+
" - adding /content/output/images/image_003_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1678 |
+
" (Found 2 images)\n",
|
| 1679 |
+
">> Inference with model on 1 image pairs\n"
|
| 1680 |
+
]
|
| 1681 |
+
},
|
| 1682 |
+
{
|
| 1683 |
+
"output_type": "stream",
|
| 1684 |
+
"name": "stderr",
|
| 1685 |
+
"text": [
|
| 1686 |
+
"\n",
|
| 1687 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1688 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.22it/s]\n",
|
| 1689 |
+
"Features: 50%|█████ | 5/10 [00:01<00:01, 3.84it/s]"
|
| 1690 |
+
]
|
| 1691 |
+
},
|
| 1692 |
+
{
|
| 1693 |
+
"output_type": "stream",
|
| 1694 |
+
"name": "stdout",
|
| 1695 |
+
"text": [
|
| 1696 |
+
">> Loading a list of 2 images\n",
|
| 1697 |
+
" - adding /content/output/images/image_003_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1698 |
+
" - adding /content/output/images/image_003_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1699 |
+
" (Found 2 images)\n",
|
| 1700 |
+
">> Inference with model on 1 image pairs\n"
|
| 1701 |
+
]
|
| 1702 |
+
},
|
| 1703 |
+
{
|
| 1704 |
+
"output_type": "stream",
|
| 1705 |
+
"name": "stderr",
|
| 1706 |
+
"text": [
|
| 1707 |
+
"\n",
|
| 1708 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1709 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.22it/s]\n",
|
| 1710 |
+
"Features: 60%|██████ | 6/10 [00:01<00:01, 3.85it/s]"
|
| 1711 |
+
]
|
| 1712 |
+
},
|
| 1713 |
+
{
|
| 1714 |
+
"output_type": "stream",
|
| 1715 |
+
"name": "stdout",
|
| 1716 |
+
"text": [
|
| 1717 |
+
">> Loading a list of 2 images\n",
|
| 1718 |
+
" - adding /content/output/images/image_004_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1719 |
+
" - adding /content/output/images/image_004_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1720 |
+
" (Found 2 images)\n",
|
| 1721 |
+
">> Inference with model on 1 image pairs\n"
|
| 1722 |
+
]
|
| 1723 |
+
},
|
| 1724 |
+
{
|
| 1725 |
+
"output_type": "stream",
|
| 1726 |
+
"name": "stderr",
|
| 1727 |
+
"text": [
|
| 1728 |
+
"\n",
|
| 1729 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1730 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.19it/s]\n",
|
| 1731 |
+
"Features: 70%|███████ | 7/10 [00:01<00:00, 3.89it/s]"
|
| 1732 |
+
]
|
| 1733 |
+
},
|
| 1734 |
+
{
|
| 1735 |
+
"output_type": "stream",
|
| 1736 |
+
"name": "stdout",
|
| 1737 |
+
"text": [
|
| 1738 |
+
">> Loading a list of 2 images\n",
|
| 1739 |
+
" - adding /content/output/images/image_004_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1740 |
+
" - adding /content/output/images/image_004_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1741 |
+
" (Found 2 images)\n",
|
| 1742 |
+
">> Inference with model on 1 image pairs\n"
|
| 1743 |
+
]
|
| 1744 |
+
},
|
| 1745 |
+
{
|
| 1746 |
+
"output_type": "stream",
|
| 1747 |
+
"name": "stderr",
|
| 1748 |
+
"text": [
|
| 1749 |
+
"\n",
|
| 1750 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1751 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.24it/s]\n",
|
| 1752 |
+
"Features: 80%|████████ | 8/10 [00:02<00:00, 3.93it/s]"
|
| 1753 |
+
]
|
| 1754 |
+
},
|
| 1755 |
+
{
|
| 1756 |
+
"output_type": "stream",
|
| 1757 |
+
"name": "stdout",
|
| 1758 |
+
"text": [
|
| 1759 |
+
">> Loading a list of 2 images\n",
|
| 1760 |
+
" - adding /content/output/images/image_005_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1761 |
+
" - adding /content/output/images/image_005_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1762 |
+
" (Found 2 images)\n",
|
| 1763 |
+
">> Inference with model on 1 image pairs\n"
|
| 1764 |
+
]
|
| 1765 |
+
},
|
| 1766 |
+
{
|
| 1767 |
+
"output_type": "stream",
|
| 1768 |
+
"name": "stderr",
|
| 1769 |
+
"text": [
|
| 1770 |
+
"\n",
|
| 1771 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1772 |
+
"100%|██████████| 1/1 [00:00<00:00, 5.18it/s]\n",
|
| 1773 |
+
"Features: 90%|█████████ | 9/10 [00:02<00:00, 3.94it/s]"
|
| 1774 |
+
]
|
| 1775 |
+
},
|
| 1776 |
+
{
|
| 1777 |
+
"output_type": "stream",
|
| 1778 |
+
"name": "stdout",
|
| 1779 |
+
"text": [
|
| 1780 |
+
">> Loading a list of 2 images\n",
|
| 1781 |
+
" - adding /content/output/images/image_005_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1782 |
+
" - adding /content/output/images/image_005_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1783 |
+
" (Found 2 images)\n",
|
| 1784 |
+
">> Inference with model on 1 image pairs\n"
|
| 1785 |
+
]
|
| 1786 |
+
},
|
| 1787 |
+
{
|
| 1788 |
+
"output_type": "stream",
|
| 1789 |
+
"name": "stderr",
|
| 1790 |
+
"text": [
|
| 1791 |
+
"\n",
|
| 1792 |
+
" 0%| | 0/1 [00:00<?, ?it/s]\u001b[A\n",
|
| 1793 |
+
"100%|██████████| 1/1 [00:00<00:00, 4.74it/s]\n",
|
| 1794 |
+
"Features: 100%|██████████| 10/10 [00:02<00:00, 3.78it/s]\n"
|
| 1795 |
+
]
|
| 1796 |
+
},
|
| 1797 |
+
{
|
| 1798 |
+
"output_type": "stream",
|
| 1799 |
+
"name": "stdout",
|
| 1800 |
+
"text": [
|
| 1801 |
+
"✓ Extracted features for 10 images\n",
|
| 1802 |
+
" Feature shape: torch.Size([1, 224, 224])\n",
|
| 1803 |
+
"\n",
|
| 1804 |
+
"=== Computing ASMK Similarity ===\n",
|
| 1805 |
+
"Global features shape: (10, 224)\n",
|
| 1806 |
+
"Similarity matrix shape: (10, 10)\n",
|
| 1807 |
+
"Similarity range: [-1.000, 0.998]\n",
|
| 1808 |
+
"✓ Built 45 unique pairs\n",
|
| 1809 |
+
"Limited to 10 pairs\n",
|
| 1810 |
+
"Selected 10 image pairs\n",
|
| 1811 |
+
"\n",
|
| 1812 |
+
"======================================================================\n",
|
| 1813 |
+
"STEP 3: MASt3R 3D Reconstruction\n",
|
| 1814 |
+
"======================================================================\n",
|
| 1815 |
+
"\n",
|
| 1816 |
+
"=== Loading MASt3R Model ===\n",
|
| 1817 |
+
"Attempting to load: naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\n",
|
| 1818 |
+
"⚠️ Failed to load MASt3R: tried to load naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric from huggingface, but failed\n",
|
| 1819 |
+
"Trying DUSt3R instead: naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\n",
|
| 1820 |
+
"✓ Loaded DUSt3R model as fallback\n",
|
| 1821 |
+
"✓ Model loaded on cuda\n",
|
| 1822 |
+
"\n",
|
| 1823 |
+
"=== Running MASt3R Reconstruction ===\n",
|
| 1824 |
+
"Selected 10 unique images from 10 pairs\n",
|
| 1825 |
+
">> Loading a list of 10 images\n",
|
| 1826 |
+
" - adding /content/output/images/image_001_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1827 |
+
" - adding /content/output/images/image_001_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1828 |
+
" - adding /content/output/images/image_002_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1829 |
+
" - adding /content/output/images/image_002_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1830 |
+
" - adding /content/output/images/image_003_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1831 |
+
" - adding /content/output/images/image_003_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1832 |
+
" - adding /content/output/images/image_004_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1833 |
+
" - adding /content/output/images/image_004_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1834 |
+
" - adding /content/output/images/image_005_bottom.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1835 |
+
" - adding /content/output/images/image_005_top.jpeg with resolution 1024x1024 --> 224x224\n",
|
| 1836 |
+
" (Found 10 images)\n",
|
| 1837 |
+
"Created 10 image pairs\n",
|
| 1838 |
+
"Running inference on 10 pairs...\n",
|
| 1839 |
+
">> Inference with model on 10 image pairs\n"
|
| 1840 |
+
]
|
| 1841 |
+
},
|
| 1842 |
+
{
|
| 1843 |
+
"output_type": "stream",
|
| 1844 |
+
"name": "stderr",
|
| 1845 |
+
"text": [
|
| 1846 |
+
"100%|██████████| 10/10 [00:02<00:00, 4.87it/s]\n"
|
| 1847 |
+
]
|
| 1848 |
+
},
|
| 1849 |
+
{
|
| 1850 |
+
"output_type": "stream",
|
| 1851 |
+
"name": "stdout",
|
| 1852 |
+
"text": [
|
| 1853 |
+
"✓ Processed 5 predictions\n",
|
| 1854 |
+
"Running global alignment...\n",
|
| 1855 |
+
" init edge (0*,2*) score=25.850017547607422\n",
|
| 1856 |
+
" init edge (0,8*) score=21.762142181396484\n",
|
| 1857 |
+
" init edge (0,5*) score=11.348612785339355\n",
|
| 1858 |
+
" init edge (5,7*) score=11.391077995300293\n",
|
| 1859 |
+
" init edge (3*,7) score=21.57988929748535\n",
|
| 1860 |
+
" init edge (3,4*) score=18.01618194580078\n",
|
| 1861 |
+
" init edge (4,6*) score=16.61330795288086\n",
|
| 1862 |
+
" init edge (1*,6) score=9.23505687713623\n",
|
| 1863 |
+
" init edge (4,9*) score=5.915617942810059\n",
|
| 1864 |
+
" init loss = 0.01782490685582161\n",
|
| 1865 |
+
"Global alignement - optimizing for:\n",
|
| 1866 |
+
"['pw_poses', 'im_depthmaps', 'im_poses', 'im_focals']\n"
|
| 1867 |
+
]
|
| 1868 |
+
},
|
| 1869 |
+
{
|
| 1870 |
+
"output_type": "stream",
|
| 1871 |
+
"name": "stderr",
|
| 1872 |
+
"text": [
|
| 1873 |
+
"100%|██████████| 50/50 [00:00<00:00, 53.08it/s, lr=1.08654e-05 loss=0.0131432]\n"
|
| 1874 |
+
]
|
| 1875 |
+
},
|
| 1876 |
+
{
|
| 1877 |
+
"output_type": "stream",
|
| 1878 |
+
"name": "stdout",
|
| 1879 |
+
"text": [
|
| 1880 |
+
"✓ Alignment complete (loss: 0.013143)\n",
|
| 1881 |
+
"\n",
|
| 1882 |
+
"======================================================================\n",
|
| 1883 |
+
"STEP 4: Converting to COLMAP (PINHOLE)\n",
|
| 1884 |
+
"======================================================================\n",
|
| 1885 |
+
"\n",
|
| 1886 |
+
"=== Extracting Camera Parameters ===\n",
|
| 1887 |
+
"✓ Extracted camera parameters for 10 images\n",
|
| 1888 |
+
"✓ Total 3D points: 501760\n",
|
| 1889 |
+
"✓ After confidence filtering (>1.5): 468537 points\n",
|
| 1890 |
+
"⚠️ Limiting points from 468537 to 60000\n",
|
| 1891 |
+
"Final point count: 60000\n",
|
| 1892 |
+
"✓ Written cameras.bin\n",
|
| 1893 |
+
"✓ Written images.bin (10 images)\n",
|
| 1894 |
+
"✓ Written points3D.bin (60000 points)\n",
|
| 1895 |
+
"\n",
|
| 1896 |
+
"✓ COLMAP sparse reconstruction saved\n",
|
| 1897 |
+
"\n",
|
| 1898 |
+
"======================================================================\n",
|
| 1899 |
+
"STEP 5: Running Gaussian Splatting\n",
|
| 1900 |
+
"======================================================================\n",
|
| 1901 |
+
"\n",
|
| 1902 |
+
"=== Running Gaussian Splatting ===\n",
|
| 1903 |
+
"Command: python /content/gaussian-splatting/train.py -s /content/output -m /content/output/gaussian_splatting --iterations 2000 --eval\n",
|
| 1904 |
+
" Source: /content/output\n",
|
| 1905 |
+
" Output: /content/output/gaussian_splatting\n",
|
| 1906 |
+
"\n",
|
| 1907 |
+
"✓ Gaussian Splatting complete\n",
|
| 1908 |
+
"\n",
|
| 1909 |
+
"✓ Point cloud directory found: /content/output/gaussian_splatting/point_cloud\n",
|
| 1910 |
+
" ✓ iteration_1000/point_cloud.ply (0.40 MB)\n",
|
| 1911 |
+
" ✓ iteration_1200/point_cloud.ply (0.79 MB)\n",
|
| 1912 |
+
" ✓ iteration_1300/point_cloud.ply (0.85 MB)\n",
|
| 1913 |
+
" ✓ iteration_1400/point_cloud.ply (1.38 MB)\n",
|
| 1914 |
+
" ✓ iteration_1500/point_cloud.ply (1.37 MB)\n",
|
| 1915 |
+
" ✓ iteration_1600/point_cloud.ply (1.57 MB)\n",
|
| 1916 |
+
" ✓ iteration_1700/point_cloud.ply (1.90 MB)\n",
|
| 1917 |
+
" ✓ iteration_1800/point_cloud.ply (3.54 MB)\n",
|
| 1918 |
+
" ✓ iteration_1900/point_cloud.ply (3.31 MB)\n",
|
| 1919 |
+
" ✓ iteration_2000/point_cloud.ply (5.91 MB)\n",
|
| 1920 |
+
"\n",
|
| 1921 |
+
"======================================================================\n",
|
| 1922 |
+
"PIPELINE COMPLETE\n",
|
| 1923 |
+
"======================================================================\n",
|
| 1924 |
+
"✓ Point cloud generated: /content/output/gaussian_splatting/point_cloud/iteration_2000/point_cloud.ply\n",
|
| 1925 |
+
" Size: 5.91 MB\n",
|
| 1926 |
+
"\n",
|
| 1927 |
+
"Output directory structure:\n",
|
| 1928 |
+
" /content/output/\n",
|
| 1929 |
+
" ├── images/ (processed images)\n",
|
| 1930 |
+
" ├── original_images/ (original source images)\n",
|
| 1931 |
+
" ├── sparse/0/ (COLMAP data)\n",
|
| 1932 |
+
" └── gaussian_splatting/ (GS output)\n",
|
| 1933 |
+
"\n",
|
| 1934 |
+
"======================================================================\n",
|
| 1935 |
+
"PIPELINE COMPLETE\n",
|
| 1936 |
+
"======================================================================\n",
|
| 1937 |
+
"Output directory: /content/output/gaussian_splatting\n"
|
| 1938 |
+
]
|
| 1939 |
+
}
|
| 1940 |
+
],
|
| 1941 |
+
"execution_count": 32
|
| 1942 |
+
},
|
| 1943 |
+
{
|
| 1944 |
+
"cell_type": "markdown",
|
| 1945 |
+
"source": [],
|
| 1946 |
+
"metadata": {
|
| 1947 |
+
"id": "aD_jlGNwzfvf"
|
| 1948 |
+
}
|
| 1949 |
+
},
|
| 1950 |
+
{
|
| 1951 |
+
"cell_type": "code",
|
| 1952 |
+
"source": [],
|
| 1953 |
+
"metadata": {
|
| 1954 |
+
"trusted": true,
|
| 1955 |
+
"id": "Ontdbh48jLmI"
|
| 1956 |
+
},
|
| 1957 |
+
"outputs": [],
|
| 1958 |
+
"execution_count": 16
|
| 1959 |
+
},
|
| 1960 |
+
{
|
| 1961 |
+
"cell_type": "markdown",
|
| 1962 |
+
"source": [
|
| 1963 |
+
"\n",
|
| 1964 |
+
"\n",
|
| 1965 |
+
"## 🔧 主要な修正:\n",
|
| 1966 |
+
"\n",
|
| 1967 |
+
"### 1. **特徴量抽出の修正 (CELL 12)**\n",
|
| 1968 |
+
"- RGB画像 `[H, W, 3]` が返される問題を修正\n",
|
| 1969 |
+
"- 特徴量次元が小さい場合は自動的に64次元に拡張\n",
|
| 1970 |
+
"- より堅牢なエラーハンドリング\n",
|
| 1971 |
+
"\n",
|
| 1972 |
+
"### 2. **ASMK類似度計算の修正 (CELL 13)**\n",
|
| 1973 |
+
"- Codebookの使用を削除し、シンプルなコサイン類似度に変更\n",
|
| 1974 |
+
"- 次元ミスマッチエラーを完全に解消\n",
|
| 1975 |
+
"- 動的な特徴量次元に対応\n",
|
| 1976 |
+
"\n",
|
| 1977 |
+
"### 3. **カメラパラメータの修正 (CELL 15)**\n",
|
| 1978 |
+
"- 画像サイズ情報を明示的に保存 (`width`, `height`)\n",
|
| 1979 |
+
"- より堅牢なエラーハンドリング\n",
|
| 1980 |
+
"\n",
|
| 1981 |
+
"### 4. **コード構造の改善**\n",
|
| 1982 |
+
"- 各セルを独立して実行可能に\n",
|
| 1983 |
+
"- メモリ管理の最適化\n",
|
| 1984 |
+
"- エラーメッセージの改善\n",
|
| 1985 |
+
"\n",
|
| 1986 |
+
"## 📋 使用方法:\n",
|
| 1987 |
+
"\n",
|
| 1988 |
+
"1. **セル1**: 依存関係をインストール\n",
|
| 1989 |
+
"2. **セル2**: カーネルを再起動(コメント)\n",
|
| 1990 |
+
"3. **セル3-19**: 順番に実行\n",
|
| 1991 |
+
"4. **セル20**: パイプラインを実行\n",
|
| 1992 |
+
"\n",
|
| 1993 |
+
"## ✨ 改善点:\n",
|
| 1994 |
+
"\n",
|
| 1995 |
+
"- ✅ ASMK失敗エラーを完全に解決\n",
|
| 1996 |
+
"- ✅ 特徴量次元の動的対応\n",
|
| 1997 |
+
"- ✅ メモリ効率の改善\n",
|
| 1998 |
+
"- ✅ より詳細なログ出力\n",
|
| 1999 |
+
"- ✅ エラー時の自動リカバリー\n",
|
| 2000 |
+
"\n"
|
| 2001 |
+
],
|
| 2002 |
+
"metadata": {
|
| 2003 |
+
"id": "K-TGZRlcjLmI"
|
| 2004 |
+
}
|
| 2005 |
+
}
|
| 2006 |
+
]
|
| 2007 |
+
}
|