codeShare commited on
Commit
cd4365a
ยท
verified ยท
1 Parent(s): 979f98b

Upload Qwen destill.ipynb

Browse files
Files changed (1) hide show
  1. Qwen destill.ipynb +1 -1
Qwen destill.ipynb CHANGED
@@ -1 +1 @@
1
- {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"gpuType":"T4","mount_file_id":"1rORehICZ99xZsrwMfy2w8Jxg6M55d81L","authorship_tag":"ABX9TyPIIjhGAQMr0WHONJwg0qwI"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU","widgets":{"application/vnd.jupyter.widget-state+json":{"5a57ab881be44b01aeff7c89911e0b1f":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_3ec66069ecb7475398fc4d212abb29db","IPY_MODEL_505aa87c8eec494aa74ce070e733e097","IPY_MODEL_c51e03bd3586437c94ab4c1f4ca5a9f3"],"layout":"IPY_MODEL_b2721bd3cc2347caa42d4208cf4c14d2"}},"3ec66069ecb7475398fc4d212abb29db":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_46c156a306734b3591062a1d15ec1fad","placeholder":"โ€‹","style":"IPY_MODEL_126c96d1284a4217b93beb6e5c963067","value":"Downloadโ€‡complete:โ€‡100%"}},"505aa87c8eec494aa74ce070e733e097":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_6fc583d920a54c9facca3db116c7351e","max":1,"min":0,"orientation":"horizontal","style":"IPY_MODEL_034c29e8a7ec4a289e8b14867994a573","value":1}},"c51e03bd3586437c94ab4c1f4ca5a9f3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_e43e4f13416248509da55cfff64baa99","placeholder":"โ€‹","style":"IPY_MODEL_5e65cb52ae24415ca0a43cc875536cdb","value":"โ€‡7.75G/7.75Gโ€‡[00:55&lt;00:00,โ€‡156MB/s]"}},"b2721bd3cc2347caa42d4208cf4c14d2":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"46c156a306734b3591062a1d15ec1fad":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"126c96d1284a4217b93beb6e5c963067":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"6fc583d920a54c9facca3db116c7351e":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":"20px"}},"034c29e8a7ec4a289e8b14867994a573":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"e43e4f13416248509da55cfff64baa99":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5e65cb52ae24415ca0a43cc875536cdb":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"36332ad9f4434acb90b08563af543768":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_ddcecded79ae4849a9e8e26685a3e054","IPY_MODEL_c873fd67f72a4d95b996cb8d367d28f1","IPY_MODEL_94abc45c6e064f8aa50ff2ba509d0222"],"layout":"IPY_MODEL_7c2572811faa4b73807235b6104ae4e8"}},"ddcecded79ae4849a9e8e26685a3e054":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_88cac50f8e3f48bf8fb31cf06b4e4b14","placeholder":"โ€‹","style":"IPY_MODEL_7f5b837700d84fe8bf8350752b317db2","value":"Fetchingโ€‡17โ€‡files:โ€‡100%"}},"c873fd67f72a4d95b996cb8d367d28f1":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_850183c35081499cb53b1dd3b03a6c5b","max":17,"min":0,"orientation":"horizontal","style":"IPY_MODEL_9925862a169b4e05a3158a3b3867ac7f","value":17}},"94abc45c6e064f8aa50ff2ba509d0222":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_57bf1a9c117c4e87a38cf16a4b4cbd8f","placeholder":"โ€‹","style":"IPY_MODEL_5df978dfc8f345459346adb5f669ef0b","value":"โ€‡17/17โ€‡[00:55&lt;00:00,โ€‡โ€‡3.70s/it]"}},"7c2572811faa4b73807235b6104ae4e8":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"88cac50f8e3f48bf8fb31cf06b4e4b14":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"7f5b837700d84fe8bf8350752b317db2":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"850183c35081499cb53b1dd3b03a6c5b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"9925862a169b4e05a3158a3b3867ac7f":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"57bf1a9c117c4e87a38cf16a4b4cbd8f":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"5df978dfc8f345459346adb5f669ef0b":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"a0c17740fb9943a989d3be4eeeeb14bc":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_d955682711ab421d8083e2e199421fc5","IPY_MODEL_1953edec87904767bbbbe6f5a36d2821","IPY_MODEL_bc65ab1b27cf4076b4c3231993367de3"],"layout":"IPY_MODEL_013e92ef37b9477da75a39295f270aa0"}},"d955682711ab421d8083e2e199421fc5":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_23093592e9a848d6a22235624f1cc98a","placeholder":"โ€‹","style":"IPY_MODEL_103e1256fb4b42798b04f9d1f12cf8cc","value":"Loadingโ€‡pipelineโ€‡components...:โ€‡100%"}},"1953edec87904767bbbbe6f5a36d2821":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_c8204357f44d42fa855e88e26b01fd29","max":5,"min":0,"orientation":"horizontal","style":"IPY_MODEL_4279c5c22e7643f38f3e5d5ed6fa4f77","value":5}},"bc65ab1b27cf4076b4c3231993367de3":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_108d722769554f58afd50e3193903276","placeholder":"โ€‹","style":"IPY_MODEL_eeb14f33be7443939616251ee838f795","value":"โ€‡5/5โ€‡[00:02&lt;00:00,โ€‡โ€‡1.80it/s]"}},"013e92ef37b9477da75a39295f270aa0":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"23093592e9a848d6a22235624f1cc98a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"103e1256fb4b42798b04f9d1f12cf8cc":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"c8204357f44d42fa855e88e26b01fd29":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"4279c5c22e7643f38f3e5d5ed6fa4f77":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"108d722769554f58afd50e3193903276":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"eeb14f33be7443939616251ee838f795":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"12a34df797d347a7bd12966f9c159659":{"model_module":"@jupyter-widgets/controls","model_name":"HBoxModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HBoxModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HBoxView","box_style":"","children":["IPY_MODEL_60a55a28f19c4aee8c83944c550c69ad","IPY_MODEL_4d614028501b46e08398189a22497ccf","IPY_MODEL_baa986ea24ab47afaaba225d57a42370"],"layout":"IPY_MODEL_595413bf606b453cb7f585f352b0786b"}},"60a55a28f19c4aee8c83944c550c69ad":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_95da5db16164470ca0df0a2e302fd0fb","placeholder":"โ€‹","style":"IPY_MODEL_18f9ac2d082f4cefbc65c7a6485e3bec","value":"Loadingโ€‡weights:โ€‡100%"}},"4d614028501b46e08398189a22497ccf":{"model_module":"@jupyter-widgets/controls","model_name":"FloatProgressModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"FloatProgressModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"ProgressView","bar_style":"success","description":"","description_tooltip":null,"layout":"IPY_MODEL_afaa7a71346c41ff84e8e3c444c6201a","max":398,"min":0,"orientation":"horizontal","style":"IPY_MODEL_257da198c0a64e73af3e2df623aae357","value":398}},"baa986ea24ab47afaaba225d57a42370":{"model_module":"@jupyter-widgets/controls","model_name":"HTMLModel","model_module_version":"1.5.0","state":{"_dom_classes":[],"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"HTMLModel","_view_count":null,"_view_module":"@jupyter-widgets/controls","_view_module_version":"1.5.0","_view_name":"HTMLView","description":"","description_tooltip":null,"layout":"IPY_MODEL_054b1053805d42e2838f98adc5cb0088","placeholder":"โ€‹","style":"IPY_MODEL_122cf330c868422d967b9624d3a34194","value":"โ€‡398/398โ€‡[00:00&lt;00:00,โ€‡466.45it/s]"}},"595413bf606b453cb7f585f352b0786b":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"95da5db16164470ca0df0a2e302fd0fb":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"18f9ac2d082f4cefbc65c7a6485e3bec":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}},"afaa7a71346c41ff84e8e3c444c6201a":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"257da198c0a64e73af3e2df623aae357":{"model_module":"@jupyter-widgets/controls","model_name":"ProgressStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"ProgressStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","bar_color":null,"description_width":""}},"054b1053805d42e2838f98adc5cb0088":{"model_module":"@jupyter-widgets/base","model_name":"LayoutModel","model_module_version":"1.2.0","state":{"_model_module":"@jupyter-widgets/base","_model_module_version":"1.2.0","_model_name":"LayoutModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"LayoutView","align_content":null,"align_items":null,"align_self":null,"border":null,"bottom":null,"display":null,"flex":null,"flex_flow":null,"grid_area":null,"grid_auto_columns":null,"grid_auto_flow":null,"grid_auto_rows":null,"grid_column":null,"grid_gap":null,"grid_row":null,"grid_template_areas":null,"grid_template_columns":null,"grid_template_rows":null,"height":null,"justify_content":null,"justify_items":null,"left":null,"margin":null,"max_height":null,"max_width":null,"min_height":null,"min_width":null,"object_fit":null,"object_position":null,"order":null,"overflow":null,"overflow_x":null,"overflow_y":null,"padding":null,"right":null,"top":null,"visibility":null,"width":null}},"122cf330c868422d967b9624d3a34194":{"model_module":"@jupyter-widgets/controls","model_name":"DescriptionStyleModel","model_module_version":"1.5.0","state":{"_model_module":"@jupyter-widgets/controls","_model_module_version":"1.5.0","_model_name":"DescriptionStyleModel","_view_count":null,"_view_module":"@jupyter-widgets/base","_view_module_version":"1.2.0","_view_name":"StyleView","description_width":""}}}}},"cells":[{"cell_type":"code","source":["# ==================== CELL 1: Setup & Model Loading ====================\n","\n","import os\n","from google.colab import userdata, drive\n","import torch\n","from diffusers import Flux2KleinPipeline, AutoencoderKL\n","from huggingface_hub import hf_hub_download\n","\n","# Mount Google Drive (if needed for saving outputs)\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"๐Ÿ” Checking GPU...\")\n","!nvidia-smi\n","\n","# Use HF_TOKEN from Colab Secrets (no login() call needed)\n","os.environ[\"HF_TOKEN\"] = userdata.get(\"HF_TOKEN\")\n","\n","# Install/update latest diffusers + torch (for Tesla T4 compatibility)\n","!pip install -U diffusers transformers accelerate torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121\n","\n","device = \"cuda\"\n","dtype = torch.bfloat16 # Best balance for Tesla T4 + Flux models\n","\n","print(\"๐Ÿ“ฆ Loading FLUX.2 [klein] 4B pipeline...\")\n","\n","# Option A: Simple full pipeline load (recommended first try)\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-4B\",\n"," torch_dtype=dtype,\n"," # variant=\"fp8\" # uncomment if you want the smaller FP8 version (faster but slightly lower quality)\n",").to(device)\n","\n","# Option B: Load with custom/special VAE (uncomment if you want explicit control or if Option A fails)\n","\"\"\"\n","# Download the special flux2-vae.safetensors if needed\n","vae_path = hf_hub_download(\n"," repo_id=\"Comfy-Org/vae-text-encorder-for-flux-klein-4b\",\n"," filename=\"split_files/vae/flux2-vae.safetensors\",\n"," local_dir=\"/content/models/vae\",\n"," force_download=False\n",")\n","\n","vae = AutoencoderKL.from_single_file(\n"," vae_path,\n"," torch_dtype=dtype\n",")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-4B\",\n"," vae=vae, # override with the special Flux.2 VAE\n"," torch_dtype=dtype,\n",").to(device)\n","\"\"\"\n","\n","# Memory optimizations for Tesla T4 (highly recommended)\n","pipe.vae.enable_slicing()\n","pipe.vae.enable_tiling()\n","\n","print(\"โœ… FLUX.2 [klein] 4B loaded successfully!\")\n","print(f\" Device: {device} | Dtype: {dtype}\")\n","print(\" VAE slicing + tiling enabled for lower VRAM usage.\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000,"referenced_widgets":["5a57ab881be44b01aeff7c89911e0b1f","3ec66069ecb7475398fc4d212abb29db","505aa87c8eec494aa74ce070e733e097","c51e03bd3586437c94ab4c1f4ca5a9f3","b2721bd3cc2347caa42d4208cf4c14d2","46c156a306734b3591062a1d15ec1fad","126c96d1284a4217b93beb6e5c963067","6fc583d920a54c9facca3db116c7351e","034c29e8a7ec4a289e8b14867994a573","e43e4f13416248509da55cfff64baa99","5e65cb52ae24415ca0a43cc875536cdb","36332ad9f4434acb90b08563af543768","ddcecded79ae4849a9e8e26685a3e054","c873fd67f72a4d95b996cb8d367d28f1","94abc45c6e064f8aa50ff2ba509d0222","7c2572811faa4b73807235b6104ae4e8","88cac50f8e3f48bf8fb31cf06b4e4b14","7f5b837700d84fe8bf8350752b317db2","850183c35081499cb53b1dd3b03a6c5b","9925862a169b4e05a3158a3b3867ac7f","57bf1a9c117c4e87a38cf16a4b4cbd8f","5df978dfc8f345459346adb5f669ef0b","a0c17740fb9943a989d3be4eeeeb14bc","d955682711ab421d8083e2e199421fc5","1953edec87904767bbbbe6f5a36d2821","bc65ab1b27cf4076b4c3231993367de3","013e92ef37b9477da75a39295f270aa0","23093592e9a848d6a22235624f1cc98a","103e1256fb4b42798b04f9d1f12cf8cc","c8204357f44d42fa855e88e26b01fd29","4279c5c22e7643f38f3e5d5ed6fa4f77","108d722769554f58afd50e3193903276","eeb14f33be7443939616251ee838f795","12a34df797d347a7bd12966f9c159659","60a55a28f19c4aee8c83944c550c69ad","4d614028501b46e08398189a22497ccf","baa986ea24ab47afaaba225d57a42370","595413bf606b453cb7f585f352b0786b","95da5db16164470ca0df0a2e302fd0fb","18f9ac2d082f4cefbc65c7a6485e3bec","afaa7a71346c41ff84e8e3c444c6201a","257da198c0a64e73af3e2df623aae357","054b1053805d42e2838f98adc5cb0088","122cf330c868422d967b9624d3a34194"]},"id":"BtsjbO4uY53B","outputId":"395397ba-f3e5-459d-fb39-db5994c9edc6"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stderr","text":["Flax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\n","Flax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\n"]},{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","๐Ÿ” Checking GPU...\n","Tue Mar 31 19:45:07 2026 \n","+-----------------------------------------------------------------------------------------+\n","| NVIDIA-SMI 580.82.07 Driver Version: 580.82.07 CUDA Version: 13.0 |\n","+-----------------------------------------+------------------------+----------------------+\n","| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|=========================================+========================+======================|\n","| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n","| N/A 38C P8 13W / 70W | 3MiB / 15360MiB | 0% Default |\n","| | | N/A |\n","+-----------------------------------------+------------------------+----------------------+\n","\n","+-----------------------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=========================================================================================|\n","| No running processes found |\n","+-----------------------------------------------------------------------------------------+\n","Looking in indexes: https://pypi.org/simple, https://download.pytorch.org/whl/cu121\n","Requirement already satisfied: diffusers in /usr/local/lib/python3.12/dist-packages (0.37.1)\n","Requirement already satisfied: transformers in /usr/local/lib/python3.12/dist-packages (5.4.0)\n","Requirement already satisfied: accelerate in /usr/local/lib/python3.12/dist-packages (1.13.0)\n","Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (2.11.0)\n","Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (0.26.0)\n","Requirement already satisfied: torchaudio in /usr/local/lib/python3.12/dist-packages (2.11.0)\n","Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.12/dist-packages (from diffusers) (8.7.1)\n","Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers) (3.25.2)\n","Requirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.28.1)\n","Requirement already satisfied: huggingface-hub<2.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (1.7.1)\n","Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.0.2)\n","Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers) (2025.11.3)\n","Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.32.4)\n","Requirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.7.0)\n","Requirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers) (11.3.0)\n","Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (26.0)\n","Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers) (6.0.3)\n","Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (0.22.2)\n","Requirement already satisfied: typer in /usr/local/lib/python3.12/dist-packages (from transformers) (0.24.1)\n","Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.12/dist-packages (from transformers) (4.67.3)\n","Requirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate) (5.9.5)\n","Requirement already satisfied: typing-extensions>=4.10.0 in /usr/local/lib/python3.12/dist-packages (from torch) (4.15.0)\n","Requirement already satisfied: setuptools<82 in /usr/local/lib/python3.12/dist-packages (from torch) (75.2.0)\n","Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch) (1.14.0)\n","Requirement already satisfied: networkx>=2.5.1 in /usr/local/lib/python3.12/dist-packages (from torch) (3.6.1)\n","Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch) (3.1.6)\n","Requirement already satisfied: fsspec>=0.8.5 in /usr/local/lib/python3.12/dist-packages (from torch) (2025.3.0)\n","Requirement already satisfied: cuda-toolkit==13.0.2 in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.0.2)\n","Requirement already satisfied: cuda-bindings<14,>=13.0.3 in /usr/local/lib/python3.12/dist-packages (from torch) (13.2.0)\n","Requirement already satisfied: nvidia-cudnn-cu13==9.19.0.56 in /usr/local/lib/python3.12/dist-packages (from torch) (9.19.0.56)\n","Requirement already satisfied: nvidia-cusparselt-cu13==0.8.0 in /usr/local/lib/python3.12/dist-packages (from torch) (0.8.0)\n","Requirement already satisfied: nvidia-nccl-cu13==2.28.9 in /usr/local/lib/python3.12/dist-packages (from torch) (2.28.9)\n","Requirement already satisfied: nvidia-nvshmem-cu13==3.4.5 in /usr/local/lib/python3.12/dist-packages (from torch) (3.4.5)\n","Requirement already satisfied: triton==3.6.0 in /usr/local/lib/python3.12/dist-packages (from torch) (3.6.0)\n","Requirement already satisfied: nvidia-cublas==13.1.0.3.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.1.0.3)\n","Requirement already satisfied: nvidia-cuda-runtime==13.0.96.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.0.96)\n","Requirement already satisfied: nvidia-cufft==12.0.0.61.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (12.0.0.61)\n","Requirement already satisfied: nvidia-cufile==1.15.1.6.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (1.15.1.6)\n","Requirement already satisfied: nvidia-cuda-cupti==13.0.85.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.0.85)\n","Requirement already satisfied: nvidia-curand==10.4.0.35.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (10.4.0.35)\n","Requirement already satisfied: nvidia-cusolver==12.0.4.66.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (12.0.4.66)\n","Requirement already satisfied: nvidia-cusparse==12.6.3.3.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (12.6.3.3)\n","Requirement already satisfied: nvidia-nvjitlink==13.0.88.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.0.88)\n","Requirement already satisfied: nvidia-cuda-nvrtc==13.0.88.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.0.88)\n","Requirement already satisfied: nvidia-nvtx==13.0.85.* in /usr/local/lib/python3.12/dist-packages (from cuda-toolkit[cublas,cudart,cufft,cufile,cupti,curand,cusolver,cusparse,nvjitlink,nvrtc,nvtx]==13.0.2; platform_system == \"Linux\"->torch) (13.0.85)\n","Requirement already satisfied: cuda-pathfinder~=1.1 in /usr/local/lib/python3.12/dist-packages (from cuda-bindings<14,>=13.0.3->torch) (1.4.3)\n","Requirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (4.12.1)\n","Requirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (2026.2.25)\n","Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (1.0.9)\n","Requirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (3.11)\n","Requirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers) (0.16.0)\n","Requirement already satisfied: hf-xet<2.0.0,>=1.4.2 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers) (1.4.2)\n","Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch) (1.3.0)\n","Requirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib-metadata->diffusers) (3.23.0)\n","Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch) (3.0.3)\n","Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (3.4.6)\n","Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (2.5.0)\n","Requirement already satisfied: click>=8.2.1 in /usr/local/lib/python3.12/dist-packages (from typer->transformers) (8.3.1)\n","Requirement already satisfied: shellingham>=1.3.0 in /usr/local/lib/python3.12/dist-packages (from typer->transformers) (1.5.4)\n","Requirement already satisfied: rich>=12.3.0 in /usr/local/lib/python3.12/dist-packages (from typer->transformers) (13.9.4)\n","Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.12/dist-packages (from typer->transformers) (0.0.4)\n","Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.12/dist-packages (from rich>=12.3.0->typer->transformers) (4.0.0)\n","Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.12/dist-packages (from rich>=12.3.0->typer->transformers) (2.19.2)\n","Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.12/dist-packages (from markdown-it-py>=2.2.0->rich>=12.3.0->typer->transformers) (0.1.2)\n","๐Ÿ“ฆ Loading FLUX.2 [klein] 4B pipeline...\n"]},{"output_type":"display_data","data":{"text/plain":["Downloading (incomplete total...): 0.00B [00:00, ?B/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5a57ab881be44b01aeff7c89911e0b1f"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["Fetching 17 files: 0%| | 0/17 [00:00<?, ?it/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"36332ad9f4434acb90b08563af543768"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a0c17740fb9943a989d3be4eeeeb14bc"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":["Loading weights: 0%| | 0/398 [00:00<?, ?it/s]"],"application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"12a34df797d347a7bd12966f9c159659"}},"metadata":{}}]},{"cell_type":"code","source":["# ============================= COMBINED CELL 2: TRAIN + EVALUATE + SAVE TO DRIVE =============================\n","# @title Combined: Train Distilled Qwen-family Encoder + Full Evaluation + Save to Drive\n","\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","import numpy as np\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","from tqdm import tqdm\n","from google.colab import drive\n","from torch.utils.data import Dataset\n","from transformers import AutoTokenizer, AutoModel, Trainer, TrainingArguments, set_seed\n","from peft import LoraConfig, get_peft_model, PeftModel\n","from datasets import Dataset as HFDataset\n","\n","set_seed(42)\n","drive.mount('/content/drive')\n","\n","print(\"๐Ÿ” Checking GPU...\")\n","!nvidia-smi\n","if not torch.cuda.is_available():\n"," raise RuntimeError(\"No GPU detected!\")\n","\n","# ====================== 1. Load Teacher Embeddings ======================\n","embed_path = \"/content/drive/MyDrive/qwen_embeddings.pt\"\n","data = torch.load(embed_path, weights_only=False)\n","teacher_embeddings = torch.stack(data[\"embeddings\"]) # [250, 1024]\n","texts = data.get(\"texts\", [f\"text_{i}\" for i in range(len(teacher_embeddings))])\n","\n","print(f\"โœ… Loaded {len(texts)} texts from Qwen teacher\")\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","# ====================== 2. Student Model (Qwen2.5-0.5B + LoRA) ======================\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","student_tokenizer = AutoTokenizer.from_pretrained(student_model_name)\n","base_model = AutoModel.from_pretrained(student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True)\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_model, lora_config)\n","\n","hidden_size = student_model.config.hidden_size\n","projection = nn.Linear(hidden_size, 1024).to(student_model.device)\n","projection.train()\n","\n","print(f\"๐Ÿ‘จโ€๐ŸŽ“ Student: {student_model_name} + LoRA + projection ({hidden_size}โ†’1024)\")\n","\n","# ====================== 3. Dataset ======================\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," \"idx\": idx\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, student_tokenizer, teacher_embeddings)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch]),\n"," \"idx\": torch.tensor([item[\"idx\"] for item in batch])\n"," }\n","\n","# ====================== 4. Trainer with Logging ======================\n","class DistillationTrainer(Trainer):\n"," def __init__(self, *args, **kwargs):\n"," super().__init__(*args, **kwargs)\n"," self.log_history = []\n","\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\")\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels.to(student_emb.device), p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n","\n"," total_loss = 0.25 * mse_loss + 0.75 * cos_loss # Strong emphasis on direction\n","\n"," return (total_loss, outputs) if return_outputs else total_loss\n","\n"," def log(self, logs):\n"," super().log(logs)\n"," self.log_history.append(logs)\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./qwen_family_distilled\",\n"," per_device_train_batch_size=8,\n"," num_train_epochs=40,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50, # Print loss every 50 steps as requested\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillationTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"๐Ÿš€ Starting training (loss will be printed every 50 steps)...\")\n","trainer.train()\n","\n","# ====================== 5. Save to Google Drive ======================\n","final_save_dir = \"/content/drive/MyDrive/distilled_qwen_encoder_for_flux\"\n","os.makedirs(final_save_dir, exist_ok=True)\n","\n","student_model.save_pretrained(final_save_dir)\n","student_tokenizer.save_pretrained(final_save_dir)\n","torch.save(projection.state_dict(), f\"{final_save_dir}/projection.pth\")\n","\n","print(f\"โœ… Model + projection saved to Google Drive: {final_save_dir}\")\n","\n","# ====================== 6. Full Evaluation ======================\n","print(\"\\n๐Ÿ”„ Running full evaluation...\")\n","\n","student_model.eval()\n","student_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Final encoding\"):\n"," inputs = student_tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(student_model.device)\n"," outputs = student_model(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb = projection(hidden)\n"," student_embeddings.append(emb.squeeze(0).cpu())\n","\n","student_embeddings = torch.stack(student_embeddings).to(student_model.device)\n","\n","# Metrics\n","mse = torch.nn.functional.mse_loss(student_embeddings, teacher_embeddings).item()\n","cos_sims = [cosine_similarity(student_embeddings[i].unsqueeze(0).cpu().numpy(),\n"," teacher_embeddings[i].unsqueeze(0).cpu().numpy())[0][0]\n"," for i in range(len(texts))]\n","\n","avg_cosine = np.mean(cos_sims)\n","std_cosine = np.std(cos_sims)\n","\n","teacher_norms = torch.norm(teacher_embeddings, dim=1).cpu().numpy()\n","student_norms = torch.norm(student_embeddings, dim=1).cpu().numpy()\n","\n","print(f\"\\n๐Ÿ“Š Final MSE: {mse:.4f}\")\n","print(f\"๐Ÿ“Š Average Cosine Similarity: {avg_cosine:.4f} (ยฑ {std_cosine:.4f})\")\n","print(f\"Teacher norm: {teacher_norms.mean():.1f} | Student norm: {student_norms.mean():.1f}\")\n","\n","# PCA Plot\n","all_embs = torch.cat([teacher_embeddings.cpu(), student_embeddings.cpu()], dim=0).numpy()\n","pca = PCA(n_components=2, random_state=42)\n","pca_result = pca.fit_transform(all_embs)\n","teacher_pca = pca_result[:len(texts)]\n","student_pca = pca_result[len(texts):]\n","\n","plt.figure(figsize=(14, 10))\n","sns.set_style(\"whitegrid\")\n","plt.scatter(teacher_pca[:, 0], teacher_pca[:, 1], c='blue', label='Qwen Teacher', s=65, marker='o')\n","plt.scatter(student_pca[:, 0], student_pca[:, 1], c='red', label='Distilled Student', s=65, marker='x')\n","for i in range(len(texts)):\n"," plt.plot([teacher_pca[i, 0], student_pca[i, 0]], [teacher_pca[i, 1], student_pca[i, 1]], 'k--', alpha=0.35)\n","plt.title('Shared PCA Space: Teacher vs Student (dotted = same text)')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","plt.tight_layout()\n","plt.show()\n","\n","print(\"\\nโœ… Training + Evaluation completed. Model saved to Google Drive.\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"JYKWzj0iXrhZ","executionInfo":{"status":"error","timestamp":1774986282865,"user_tz":-120,"elapsed":29561,"user":{"displayName":"fukU Google","userId":"02763165356193834046"}},"outputId":"36d015cc-5557-493a-fbaf-0dba2b650ffe"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n","๐Ÿ” Checking GPU...\n","Tue Mar 31 19:44:42 2026 \n","+-----------------------------------------------------------------------------------------+\n","| NVIDIA-SMI 580.82.07 Driver Version: 580.82.07 CUDA Version: 13.0 |\n","+-----------------------------------------+------------------------+----------------------+\n","| GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC |\n","| Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. |\n","| | | MIG M. |\n","|=========================================+========================+======================|\n","| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n","| N/A 37C P8 9W / 70W | 0MiB / 15360MiB | 0% Default |\n","| | | N/A |\n","+-----------------------------------------+------------------------+----------------------+\n","\n","+-----------------------------------------------------------------------------------------+\n","| Processes: |\n","| GPU GI CI PID Type Process name GPU Memory |\n","| ID ID Usage |\n","|=========================================================================================|\n","| No running processes found |\n","+-----------------------------------------------------------------------------------------+\n"]},{"output_type":"error","ename":"FileNotFoundError","evalue":"[Errno 2] No such file or directory: '/content/drive/MyDrive/qwen_embeddings.pt'","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_10809/1073359533.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0;31m# ====================== 1. Load Teacher Embeddings ======================\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0membed_path\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"/content/drive/MyDrive/qwen_embeddings.pt\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0membed_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mweights_only\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 31\u001b[0m \u001b[0mteacher_embeddings\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"embeddings\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# [250, 1024]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 32\u001b[0m \u001b[0mtexts\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"texts\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34mf\"text_{i}\"\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mteacher_embeddings\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/serialization.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(f, map_location, pickle_module, weights_only, mmap, **pickle_load_args)\u001b[0m\n\u001b[1;32m 1528\u001b[0m \u001b[0mpickle_load_args\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"encoding\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"utf-8\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1529\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1530\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0m_open_file_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mf\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"rb\"\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mopened_file\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1531\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m_is_zipfile\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mopened_file\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1532\u001b[0m \u001b[0;31m# The zipfile reader is going to advance the current file position.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/serialization.py\u001b[0m in \u001b[0;36m_open_file_like\u001b[0;34m(name_or_buffer, mode)\u001b[0m\n\u001b[1;32m 793\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_open_file_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname_or_buffer\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mFileLike\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0m_opener\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mIO\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mbytes\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 794\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0m_is_path\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname_or_buffer\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 795\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_open_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 796\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 797\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m\"w\"\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/serialization.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, name, mode)\u001b[0m\n\u001b[1;32m 774\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0m_open_file\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_opener\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mIO\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mbytes\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 775\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m \u001b[0;34m|\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mPathLike\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 776\u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# noqa: SIM115\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 778\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__exit__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/content/drive/MyDrive/qwen_embeddings.pt'"]}]}]}
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"1rORehICZ99xZsrwMfy2w8Jxg6M55d81L","timestamp":1774986352541}],"gpuType":"T4","mount_file_id":"1rORehICZ99xZsrwMfy2w8Jxg6M55d81L","authorship_tag":"ABX9TyPIIjhGAQMr0WHONJwg0qwI"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","source":["# ==================== CELL 1: Setup & Model Loading ====================\n","\n","import os\n","from google.colab import userdata, drive\n","import torch\n","from diffusers import Flux2KleinPipeline, AutoencoderKL\n","from huggingface_hub import hf_hub_download\n","\n","# Mount Google Drive (if needed for saving outputs)\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"๐Ÿ” Checking GPU...\")\n","!nvidia-smi\n","\n","# Use HF_TOKEN from Colab Secrets (no login() call needed)\n","os.environ[\"HF_TOKEN\"] = userdata.get(\"HF_TOKEN\")\n","\n","# Install/update latest diffusers + torch (for Tesla T4 compatibility)\n","!pip install -U diffusers transformers accelerate torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu121\n","\n","device = \"cuda\"\n","dtype = torch.bfloat16 # Best balance for Tesla T4 + Flux models\n","\n","print(\"๐Ÿ“ฆ Loading FLUX.2 [klein] 4B pipeline...\")\n","\n","# Option A: Simple full pipeline load (recommended first try)\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-4B\",\n"," torch_dtype=dtype,\n"," # variant=\"fp8\" # uncomment if you want the smaller FP8 version (faster but slightly lower quality)\n",").to(device)\n","\n","# Option B: Load with custom/special VAE (uncomment if you want explicit control or if Option A fails)\n","\"\"\"\n","# Download the special flux2-vae.safetensors if needed\n","vae_path = hf_hub_download(\n"," repo_id=\"Comfy-Org/vae-text-encorder-for-flux-klein-4b\",\n"," filename=\"split_files/vae/flux2-vae.safetensors\",\n"," local_dir=\"/content/models/vae\",\n"," force_download=False\n",")\n","\n","vae = AutoencoderKL.from_single_file(\n"," vae_path,\n"," torch_dtype=dtype\n",")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-4B\",\n"," vae=vae, # override with the special Flux.2 VAE\n"," torch_dtype=dtype,\n",").to(device)\n","\"\"\"\n","\n","# Memory optimizations for Tesla T4 (highly recommended)\n","pipe.vae.enable_slicing()\n","pipe.vae.enable_tiling()\n","\n","print(\"โœ… FLUX.2 [klein] 4B loaded successfully!\")\n","print(f\" Device: {device} | Dtype: {dtype}\")\n","print(\" VAE slicing + tiling enabled for lower VRAM usage.\")"],"metadata":{"id":"BtsjbO4uY53B"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= COMBINED CELL 2: TRAIN + EVALUATE + SAVE TO DRIVE =============================\n","# @title Combined: Train Distilled Qwen-family Encoder + Full Evaluation + Save to Drive\n","\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","import numpy as np\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","from tqdm import tqdm\n","from google.colab import drive\n","from torch.utils.data import Dataset\n","from transformers import AutoTokenizer, AutoModel, Trainer, TrainingArguments, set_seed\n","from peft import LoraConfig, get_peft_model, PeftModel\n","from datasets import Dataset as HFDataset\n","\n","set_seed(42)\n","drive.mount('/content/drive')\n","\n","print(\"๐Ÿ” Checking GPU...\")\n","!nvidia-smi\n","if not torch.cuda.is_available():\n"," raise RuntimeError(\"No GPU detected!\")\n","\n","# ====================== 1. Load Teacher Embeddings ======================\n","embed_path = \"/content/drive/MyDrive/qwen_embeddings.pt\"\n","data = torch.load(embed_path, weights_only=False)\n","teacher_embeddings = torch.stack(data[\"embeddings\"]) # [250, 1024]\n","texts = data.get(\"texts\", [f\"text_{i}\" for i in range(len(teacher_embeddings))])\n","\n","print(f\"โœ… Loaded {len(texts)} texts from Qwen teacher\")\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","# ====================== 2. Student Model (Qwen2.5-0.5B + LoRA) ======================\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","student_tokenizer = AutoTokenizer.from_pretrained(student_model_name)\n","base_model = AutoModel.from_pretrained(student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True)\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_model, lora_config)\n","\n","hidden_size = student_model.config.hidden_size\n","projection = nn.Linear(hidden_size, 1024).to(student_model.device)\n","projection.train()\n","\n","print(f\"๐Ÿ‘จโ€๐ŸŽ“ Student: {student_model_name} + LoRA + projection ({hidden_size}โ†’1024)\")\n","\n","# ====================== 3. Dataset ======================\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," \"idx\": idx\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, student_tokenizer, teacher_embeddings)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch]),\n"," \"idx\": torch.tensor([item[\"idx\"] for item in batch])\n"," }\n","\n","# ====================== 4. Trainer with Logging ======================\n","class DistillationTrainer(Trainer):\n"," def __init__(self, *args, **kwargs):\n"," super().__init__(*args, **kwargs)\n"," self.log_history = []\n","\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\")\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels.to(student_emb.device), p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n","\n"," total_loss = 0.25 * mse_loss + 0.75 * cos_loss # Strong emphasis on direction\n","\n"," return (total_loss, outputs) if return_outputs else total_loss\n","\n"," def log(self, logs):\n"," super().log(logs)\n"," self.log_history.append(logs)\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./qwen_family_distilled\",\n"," per_device_train_batch_size=8,\n"," num_train_epochs=40,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50, # Print loss every 50 steps as requested\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillationTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"๐Ÿš€ Starting training (loss will be printed every 50 steps)...\")\n","trainer.train()\n","\n","# ====================== 5. Save to Google Drive ======================\n","final_save_dir = \"/content/drive/MyDrive/distilled_qwen_encoder_for_flux\"\n","os.makedirs(final_save_dir, exist_ok=True)\n","\n","student_model.save_pretrained(final_save_dir)\n","student_tokenizer.save_pretrained(final_save_dir)\n","torch.save(projection.state_dict(), f\"{final_save_dir}/projection.pth\")\n","\n","print(f\"โœ… Model + projection saved to Google Drive: {final_save_dir}\")\n","\n","# ====================== 6. Full Evaluation ======================\n","print(\"\\n๐Ÿ”„ Running full evaluation...\")\n","\n","student_model.eval()\n","student_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Final encoding\"):\n"," inputs = student_tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(student_model.device)\n"," outputs = student_model(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb = projection(hidden)\n"," student_embeddings.append(emb.squeeze(0).cpu())\n","\n","student_embeddings = torch.stack(student_embeddings).to(student_model.device)\n","\n","# Metrics\n","mse = torch.nn.functional.mse_loss(student_embeddings, teacher_embeddings).item()\n","cos_sims = [cosine_similarity(student_embeddings[i].unsqueeze(0).cpu().numpy(),\n"," teacher_embeddings[i].unsqueeze(0).cpu().numpy())[0][0]\n"," for i in range(len(texts))]\n","\n","avg_cosine = np.mean(cos_sims)\n","std_cosine = np.std(cos_sims)\n","\n","teacher_norms = torch.norm(teacher_embeddings, dim=1).cpu().numpy()\n","student_norms = torch.norm(student_embeddings, dim=1).cpu().numpy()\n","\n","print(f\"\\n๐Ÿ“Š Final MSE: {mse:.4f}\")\n","print(f\"๐Ÿ“Š Average Cosine Similarity: {avg_cosine:.4f} (ยฑ {std_cosine:.4f})\")\n","print(f\"Teacher norm: {teacher_norms.mean():.1f} | Student norm: {student_norms.mean():.1f}\")\n","\n","# PCA Plot\n","all_embs = torch.cat([teacher_embeddings.cpu(), student_embeddings.cpu()], dim=0).numpy()\n","pca = PCA(n_components=2, random_state=42)\n","pca_result = pca.fit_transform(all_embs)\n","teacher_pca = pca_result[:len(texts)]\n","student_pca = pca_result[len(texts):]\n","\n","plt.figure(figsize=(14, 10))\n","sns.set_style(\"whitegrid\")\n","plt.scatter(teacher_pca[:, 0], teacher_pca[:, 1], c='blue', label='Qwen Teacher', s=65, marker='o')\n","plt.scatter(student_pca[:, 0], student_pca[:, 1], c='red', label='Distilled Student', s=65, marker='x')\n","for i in range(len(texts)):\n"," plt.plot([teacher_pca[i, 0], student_pca[i, 0]], [teacher_pca[i, 1], student_pca[i, 1]], 'k--', alpha=0.35)\n","plt.title('Shared PCA Space: Teacher vs Student (dotted = same text)')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","plt.tight_layout()\n","plt.show()\n","\n","print(\"\\nโœ… Training + Evaluation completed. Model saved to Google Drive.\")"],"metadata":{"id":"JYKWzj0iXrhZ"},"execution_count":null,"outputs":[]}]}