codeShare commited on
Commit
49a3b8c
Β·
verified Β·
1 Parent(s): cf7c2a2

Upload klein-lora-training_kaggle.ipynb

Browse files
Files changed (1) hide show
  1. klein-lora-training_kaggle.ipynb +1 -0
klein-lora-training_kaggle.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":14564379,"sourceType":"datasetVersion","datasetId":8022630}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# CELL 1 β€” Install correct versions\n\n!pip uninstall -y torch torchvision torchaudio diffusers accelerate peft transformers\n\n!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n\n!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n\n!pip install git+https://github.com/huggingface/diffusers.git\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:15:04.532167Z","iopub.execute_input":"2026-01-21T00:15:04.532433Z","iopub.status.idle":"2026-01-21T00:18:08.542205Z","shell.execute_reply.started":"2026-01-21T00:15:04.532400Z","shell.execute_reply":"2026-01-21T00:18:08.541444Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"name":"stdout","text":"Found existing installation: torch 2.8.0+cu126\nUninstalling torch-2.8.0+cu126:\n Successfully uninstalled torch-2.8.0+cu126\nFound existing installation: torchvision 0.23.0+cu126\nUninstalling torchvision-0.23.0+cu126:\n Successfully uninstalled torchvision-0.23.0+cu126\nFound existing installation: torchaudio 2.8.0+cu126\nUninstalling torchaudio-2.8.0+cu126:\n Successfully uninstalled torchaudio-2.8.0+cu126\nFound existing installation: diffusers 0.35.2\nUninstalling diffusers-0.35.2:\n Successfully uninstalled diffusers-0.35.2\nFound existing installation: accelerate 1.11.0\nUninstalling accelerate-1.11.0:\n Successfully uninstalled accelerate-1.11.0\nFound existing installation: peft 0.17.1\nUninstalling peft-0.17.1:\n Successfully uninstalled peft-0.17.1\nFound existing installation: transformers 4.57.1\nUninstalling transformers-4.57.1:\n Successfully uninstalled transformers-4.57.1\nLooking in indexes: https://download.pytorch.org/whl/cu121\nCollecting torch==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (780.4 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m780.4/780.4 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchvision==0.20.1\n Downloading https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp312-cp312-linux_x86_64.whl (7.3 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.3/7.3 MB\u001b[0m \u001b[31m114.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchaudio==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torchaudio-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (3.4 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m107.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hInstalling collected packages: torchaudio, torchvision, torch\nSuccessfully installed torch-2.5.1+cu121 torchaudio-2.5.1+cu121 torchvision-0.20.1+cu121\nCollecting diffusers\n Downloading diffusers-0.36.0-py3-none-any.whl.metadata (20 kB)\nCollecting transformers\n Downloading transformers-4.57.6-py3-none-any.whl.metadata (43 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m44.0/44.0 kB\u001b[0m \u001b[31m15.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting accelerate\n Downloading accelerate-1.12.0-py3-none-any.whl.metadata (19 kB)\nCollecting peft\n Downloading peft-0.18.1-py3-none-any.whl.metadata (14 kB)\nRequirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (0.6.2)\nCollecting safetensors\n Downloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\nRequirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\nCollecting huggingface-hub\n Downloading huggingface_hub-1.3.2-py3-none-any.whl.metadata (13 kB)\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.28.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.32.5)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers) (11.3.0)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers) (6.0.3)\nRequirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (0.22.1)\nRequirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate) (5.9.5)\nRequirement already satisfied: torch>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from accelerate) (2.5.1+cu121)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (2025.10.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (1.2.1rc0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers) (0.16.0)\nRequirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.1.6)\nCollecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cudnn-cu12==9.1.0.70 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cublas-cu12==12.1.3.1 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cufft-cu12==11.0.2.54 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-curand-cu12==10.3.2.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-nccl-cu12==2.21.5 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl.metadata (1.8 kB)\nCollecting nvidia-nvtx-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.7 kB)\nCollecting triton==3.1.0 (from torch>=2.0.0->accelerate)\n Downloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.3 kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (75.2.0)\nCollecting sympy==1.13.1 (from torch>=2.0.0->accelerate)\n Downloading sympy-1.13.1-py3-none-any.whl.metadata (12 kB)\nRequirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.12/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->accelerate) (12.6.85)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy==1.13.1->torch>=2.0.0->accelerate) (1.3.0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (2.6.3)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=2.0.0->accelerate) (3.0.3)\nDownloading diffusers-0.36.0-py3-none-any.whl (4.6 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.6/4.6 MB\u001b[0m \u001b[31m67.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading transformers-4.57.6-py3-none-any.whl (12.0 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m242.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading accelerate-1.12.0-py3-none-any.whl (380 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m380.9/380.9 kB\u001b[0m \u001b[31m387.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading peft-0.18.1-py3-none-any.whl (556 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m557.0/557.0 kB\u001b[0m \u001b[31m387.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m507.2/507.2 kB\u001b[0m \u001b[31m399.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m280.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m242.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m287.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m307.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m276.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m257.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m273.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m258.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m278.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl (188.7 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m188.7/188.7 MB\u001b[0m \u001b[31m247.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m295.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading sympy-1.13.1-py3-none-any.whl (6.2 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.2/6.2 MB\u001b[0m \u001b[31m250.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (209.6 MB)\n\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m209.6/209.6 MB\u001b[0m \u001b[31m227.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hInstalling collected packages: triton, sympy, safetensors, nvidia-nvtx-cu12, nvidia-nccl-cu12, nvidia-cusparse-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusolver-cu12, nvidia-cudnn-cu12, diffusers, transformers, accelerate, peft\n Attempting uninstall: triton\n Found existing installation: triton 3.4.0\n Uninstalling triton-3.4.0:\n Successfully uninstalled triton-3.4.0\n Attempting uninstall: sympy\n Found existing installation: sympy 1.13.3\n Uninstalling sympy-1.13.3:\n Successfully uninstalled sympy-1.13.3\n Attempting uninstall: safetensors\n Found existing installation: safetensors 0.6.2\n Uninstalling safetensors-0.6.2:\n Successfully uninstalled safetensors-0.6.2\n Attempting uninstall: nvidia-nvtx-cu12\n Found existing installation: nvidia-nvtx-cu12 12.6.77\n Uninstalling nvidia-nvtx-cu12-12.6.77:\n Successfully uninstalled nvidia-nvtx-cu12-12.6.77\n Attempting uninstall: nvidia-nccl-cu12\n Found existing installation: nvidia-nccl-cu12 2.27.3\n Uninstalling nvidia-nccl-cu12-2.27.3:\n Successfully uninstalled nvidia-nccl-cu12-2.27.3\n Attempting uninstall: nvidia-cusparse-cu12\n Found existing installation: nvidia-cusparse-cu12 12.5.4.2\n Uninstalling nvidia-cusparse-cu12-12.5.4.2:\n Successfully uninstalled nvidia-cusparse-cu12-12.5.4.2\n Attempting uninstall: nvidia-curand-cu12\n Found existing installation: nvidia-curand-cu12 10.3.7.77\n Uninstalling nvidia-curand-cu12-10.3.7.77:\n Successfully uninstalled nvidia-curand-cu12-10.3.7.77\n Attempting uninstall: nvidia-cufft-cu12\n Found existing installation: nvidia-cufft-cu12 11.3.0.4\n Uninstalling nvidia-cufft-cu12-11.3.0.4:\n Successfully uninstalled nvidia-cufft-cu12-11.3.0.4\n Attempting uninstall: nvidia-cuda-runtime-cu12\n Found existing installation: nvidia-cuda-runtime-cu12 12.6.77\n Uninstalling nvidia-cuda-runtime-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-runtime-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-nvrtc-cu12\n Found existing installation: nvidia-cuda-nvrtc-cu12 12.6.77\n Uninstalling nvidia-cuda-nvrtc-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-cupti-cu12\n Found existing installation: nvidia-cuda-cupti-cu12 12.6.80\n Uninstalling nvidia-cuda-cupti-cu12-12.6.80:\n Successfully uninstalled nvidia-cuda-cupti-cu12-12.6.80\n Attempting uninstall: nvidia-cublas-cu12\n Found existing installation: nvidia-cublas-cu12 12.6.4.1\n Uninstalling nvidia-cublas-cu12-12.6.4.1:\n Successfully uninstalled nvidia-cublas-cu12-12.6.4.1\n Attempting uninstall: nvidia-cusolver-cu12\n Found existing installation: nvidia-cusolver-cu12 11.7.1.2\n Uninstalling nvidia-cusolver-cu12-11.7.1.2:\n Successfully uninstalled nvidia-cusolver-cu12-11.7.1.2\n Attempting uninstall: nvidia-cudnn-cu12\n Found existing installation: nvidia-cudnn-cu12 9.10.2.21\n Uninstalling nvidia-cudnn-cu12-9.10.2.21:\n Successfully uninstalled nvidia-cudnn-cu12-9.10.2.21\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ncudf-cu12 25.6.0 requires pyarrow<20.0.0a0,>=14.0.0; platform_machine == \"x86_64\", but you have pyarrow 22.0.0 which is incompatible.\nfastai 2.8.4 requires fastcore<1.9,>=1.8.0, but you have fastcore 1.11.3 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mSuccessfully installed accelerate-1.12.0 diffusers-0.36.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.21.5 nvidia-nvtx-cu12-12.1.105 peft-0.18.1 safetensors-0.7.0 sympy-1.13.1 transformers-4.57.6 triton-3.1.0\nCollecting git+https://github.com/huggingface/diffusers.git\n Cloning https://github.com/huggingface/diffusers.git to /tmp/pip-req-build-2moc7t2u\n Running command git clone --filter=blob:none --quiet https://github.com/huggingface/diffusers.git /tmp/pip-req-build-2moc7t2u\n Resolved https://github.com/huggingface/diffusers.git to commit ec376293714f269947f6d9d8a572bd73040bc1a0\n Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.28.1)\nRequirement already satisfied: huggingface-hub<2.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.36.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.32.5)\nRequirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.7.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (11.3.0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers==0.37.0.dev0) (0.16.0)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (2025.10.0)\nRequirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (6.0.3)\nRequirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.67.1)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (1.2.1rc0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers==0.37.0.dev0) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (2.6.3)\nBuilding wheels for collected packages: diffusers\n Building wheel for diffusers (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n Created wheel for diffusers: filename=diffusers-0.37.0.dev0-py3-none-any.whl size=4893406 sha256=3233a06ba71f7ba4b178fe8b614ba72c8fb825bc759e19556967cea1af96e399\n Stored in directory: /tmp/pip-ephem-wheel-cache-2aj85n60/wheels/23/0f/7d/f97813d265ed0e599a78d83afd4e1925740896ca79b46cccfd\nSuccessfully built diffusers\nInstalling collected packages: diffusers\n Attempting uninstall: diffusers\n Found existing installation: diffusers 0.36.0\n Uninstalling diffusers-0.36.0:\n Successfully uninstalled diffusers-0.36.0\nSuccessfully installed diffusers-0.37.0.dev0\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"# CELL 2 β€” Verify\n\nimport torch, diffusers\n\nprint(\"Torch:\", torch.__version__)\nprint(\"Diffusers:\", diffusers.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\nprint(\"GPU:\", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"None\")\n\nfrom diffusers import Flux2KleinPipeline\nprint(\"Flux2KleinPipeline OK\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:18:08.544139Z","iopub.execute_input":"2026-01-21T00:18:08.544392Z","iopub.status.idle":"2026-01-21T00:18:32.472334Z","shell.execute_reply.started":"2026-01-21T00:18:08.544364Z","shell.execute_reply":"2026-01-21T00:18:32.471684Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"name":"stdout","text":"Torch: 2.5.1+cu121\nDiffusers: 0.37.0.dev0\nCUDA: True\nGPU: Tesla P100-PCIE-16GB\n","output_type":"stream"},{"name":"stderr","text":"2026-01-21 00:18:20.049931: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1768954700.242537 55 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1768954700.299301 55 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nW0000 00:00:1768954700.780027 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768954700.780067 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768954700.780070 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768954700.780072 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\nFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\n","output_type":"stream"},{"name":"stdout","text":"Flux2KleinPipeline OK\n","output_type":"stream"}],"execution_count":2},{"cell_type":"code","source":"# CELL 3 β€” Config\n\nimport os\n\ndevice = \"cuda\"\ndtype = torch.float16\n\nDATASET_NAME = \"image-caption-dataset\" # change if needed\n\nCAPTIONS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_captions.json\"\nLATENTS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_latents.safetensors\"\n\nCACHE_DIR = \"/kaggle/working/cache\"\nSAVE_DIR = \"/kaggle/working/flux_klein_lora\"\n\nos.makedirs(CACHE_DIR, exist_ok=True)\nos.makedirs(SAVE_DIR, exist_ok=True)\n\n# training\nACCUM_STEPS = 2\nALPHA = 16\n#--#\nLR = 2e-5\nSTEPS = 1000 # or more\nRANK = 16 # better for FLUX\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:59:16.135867Z","iopub.execute_input":"2026-01-21T00:59:16.136654Z","iopub.status.idle":"2026-01-21T00:59:16.141971Z","shell.execute_reply.started":"2026-01-21T00:59:16.136622Z","shell.execute_reply":"2026-01-21T00:59:16.141274Z"}},"outputs":[],"execution_count":29},{"cell_type":"code","source":"# CELL 4 β€” Load captions + latents\n\nimport json\nfrom safetensors.torch import load_file\n\nwith open(CAPTIONS_PATH) as f:\n captions = json.load(f)\n\nlatents = load_file(LATENTS_PATH)\n\nkeys = list(captions.keys())\nprint(\"Samples:\", len(keys))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:18:32.479783Z","iopub.execute_input":"2026-01-21T00:18:32.480186Z","iopub.status.idle":"2026-01-21T00:18:32.970753Z","shell.execute_reply.started":"2026-01-21T00:18:32.480154Z","shell.execute_reply":"2026-01-21T00:18:32.970140Z"}},"outputs":[{"name":"stdout","text":"Samples: 125\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"# CELL 5 β€” Dataset (returns latent + key)\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nclass FluxLatentDataset(Dataset):\n def __len__(self):\n return len(keys)\n\n def __getitem__(self, idx):\n k = keys[idx]\n return latents[f\"{k}\"], k # <-- return KEY, not caption\n\ndataset = FluxLatentDataset()\nloader = DataLoader(dataset, batch_size=1, shuffle=True)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:21:23.274091Z","iopub.execute_input":"2026-01-21T00:21:23.274581Z","iopub.status.idle":"2026-01-21T00:21:23.279443Z","shell.execute_reply.started":"2026-01-21T00:21:23.274553Z","shell.execute_reply":"2026-01-21T00:21:23.278707Z"}},"outputs":[],"execution_count":11},{"cell_type":"code","source":"# CELL 6 β€” Load ONLY tokenizer + text encoder (GPU, FP16)\n\nimport torch, gc\nfrom transformers import AutoTokenizer, AutoModel\n\nMODEL_ID = \"black-forest-labs/FLUX.2-klein-4B\"\n\ntokenizer = AutoTokenizer.from_pretrained(\n MODEL_ID,\n subfolder=\"tokenizer\",\n trust_remote_code=True,\n cache_dir=CACHE_DIR\n)\n\ntext_encoder = AutoModel.from_pretrained(\n MODEL_ID,\n subfolder=\"text_encoder\",\n trust_remote_code=True,\n dtype=torch.float16,\n device_map=\"cuda\", # <-- put on GPU\n cache_dir=CACHE_DIR\n).eval()\n\ntorch.cuda.empty_cache()\n\nprint(\"Loaded Qwen text encoder on GPU (FP16).\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:21:32.776301Z","iopub.execute_input":"2026-01-21T00:21:32.776596Z","iopub.status.idle":"2026-01-21T00:22:15.485565Z","shell.execute_reply.started":"2026-01-21T00:21:32.776570Z","shell.execute_reply":"2026-01-21T00:22:15.484863Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"78cd415463994109b5bd6a4649b5d489"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vocab.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"967b81e3a1df4473bd898af2be9c4366"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"merges.txt: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"f584e4da10a74a97aa6d9450f6d072a6"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer/tokenizer.json: 0%| | 0.00/11.4M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"57be81109ed24de0b9c638d825be6427"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"added_tokens.json: 0%| | 0.00/707 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"602957bb2acb4fed96a028500fbea137"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"special_tokens_map.json: 0%| | 0.00/613 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"3e335d2adbb546f0b1b50d894b8ee791"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"chat_template.jinja: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"e34111218a4941e2ba1481bc7c627a54"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"d9989cf7ca584e9d9c02d6f1382381f2"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"model.safetensors.index.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"e4687fc9b6a642039a60e5dfc6d0e57c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 2 files: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b8031f34530f40ff8216473ee5d6164d"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00002-of-00002.safete(…): 0%| | 0.00/3.08G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"d405adeeb6a34e21b8d1482397f4ec02"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00001-of-00002.safete(…): 0%| | 0.00/4.97G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"1c48420b21ea44a487ee7868c53eaf29"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"f054b8d885ac4ec49214534183fab701"}},"metadata":{}},{"name":"stdout","text":"Loaded Qwen text encoder on GPU (FP16).\n","output_type":"stream"}],"execution_count":12},{"cell_type":"code","source":"# CELL 7 β€” Cache text embeddings (encode on GPU, store on CPU)\n\nfrom tqdm import tqdm\n\ntext_cache = {}\n\nwith torch.no_grad():\n for k in tqdm(keys, desc=\"Encoding captions\"):\n caption = captions[k]\n\n t = tokenizer(\n caption,\n padding=\"max_length\",\n truncation=True,\n max_length=256,\n return_tensors=\"pt\"\n ).to(\"cuda\")\n\n out = text_encoder(**t)\n emb = out.last_hidden_state.squeeze(0).cpu().half() # [256, 2560] on CPU\n\n text_cache[k] = emb\n\ntorch.cuda.empty_cache()\n\nprint(\"Cached text embeddings:\", len(text_cache))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:22:15.487062Z","iopub.execute_input":"2026-01-21T00:22:15.487435Z","iopub.status.idle":"2026-01-21T00:22:57.549044Z","shell.execute_reply.started":"2026-01-21T00:22:15.487413Z","shell.execute_reply":"2026-01-21T00:22:57.548231Z"}},"outputs":[{"name":"stderr","text":"Encoding captions: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 125/125 [00:42<00:00, 2.97it/s]","output_type":"stream"},{"name":"stdout","text":"Cached text embeddings: 125\n","output_type":"stream"},{"name":"stderr","text":"\n","output_type":"stream"}],"execution_count":13},{"cell_type":"code","source":"# CELL 8 β€” Free text encoder + tokenizer and verify VRAM\n\nimport gc, torch\n\ndef print_vram(tag):\n if torch.cuda.is_available():\n alloc = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n total = torch.cuda.get_device_properties(0).total_memory / 1024**3\n print(f\"[{tag}] VRAM allocated: {alloc:.2f} GB | reserved: {reserved:.2f} GB | total: {total:.2f} GB\")\n\nprint_vram(\"before delete\")\n\ndel text_encoder\ndel tokenizer\n\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint_vram(\"after delete\")\n\nprint(\"Text encoder and tokenizer removed.\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:22:57.550346Z","iopub.execute_input":"2026-01-21T00:22:57.550693Z","iopub.status.idle":"2026-01-21T00:22:57.962820Z","shell.execute_reply.started":"2026-01-21T00:22:57.550669Z","shell.execute_reply":"2026-01-21T00:22:57.962101Z"}},"outputs":[{"name":"stdout","text":"[before delete] VRAM allocated: 7.54 GB | reserved: 7.58 GB | total: 15.89 GB\n[after delete] VRAM allocated: 0.04 GB | reserved: 0.09 GB | total: 15.89 GB\nText encoder and tokenizer removed.\n","output_type":"stream"}],"execution_count":14},{"cell_type":"code","source":"# CELL 9 β€” Load ONLY transformer weights to GPU, then delete pipeline\n\nimport torch, gc\nfrom diffusers import Flux2KleinPipeline\n\ndef print_vram(tag):\n if torch.cuda.is_available():\n alloc = torch.cuda.memory_allocated() / 1024**3\n reserved = torch.cuda.memory_reserved() / 1024**3\n print(f\"[{tag}] VRAM allocated: {alloc:.2f} GB | reserved: {reserved:.2f} GB\")\n\ntorch.cuda.empty_cache()\ngc.collect()\nprint_vram(\"before load\")\n\npipe = Flux2KleinPipeline.from_pretrained(\n \"black-forest-labs/FLUX.2-klein-4B\",\n torch_dtype=dtype,\n cache_dir=CACHE_DIR,\n)\n\n# move ONLY transformer to GPU\ntransformer = pipe.transformer.to(device)\n\n# delete everything else\npipe.text_encoder = None\npipe.vae = None\npipe.tokenizer = None\npipe.scheduler = None\npipe = None\n\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint_vram(\"after load\")\nprint(\"Transformer loaded only.\")\nprint(\"in_channels:\", transformer.config.in_channels)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:23:08.472612Z","iopub.execute_input":"2026-01-21T00:23:08.473342Z","iopub.status.idle":"2026-01-21T00:24:54.791210Z","shell.execute_reply.started":"2026-01-21T00:23:08.473301Z","shell.execute_reply":"2026-01-21T00:24:54.790425Z"}},"outputs":[{"name":"stdout","text":"[before load] VRAM allocated: 0.04 GB | reserved: 0.09 GB\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"model_index.json: 0%| | 0.00/446 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"c608acdc8f42416baead2ba50f29668d"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 17 files: 0%| | 0/17 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"60a29c3077474c608ada2991fc9c43c1"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/821 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"da1ec0104f65426581d9a7f280dfae72"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/541 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7eebe62339db43e2b33d819442b86b7b"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"scheduler_config.json: 0%| | 0.00/486 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"86d8e80877354a33b37facb2645c5335"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"transformer/diffusion_pytorch_model.safe(…): 0%| | 0.00/7.75G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5ddb87c80c454238b7c2bcc897084e9d"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vae/diffusion_pytorch_model.safetensors: 0%| | 0.00/168M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7c33328fcae54bd9a689a97e8b986c96"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"964bb4027fdf4dd1a9d437da753794f4"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5218ce6b556c4c14967e3d7c50f820e3"}},"metadata":{}},{"name":"stdout","text":"[after load] VRAM allocated: 7.26 GB | reserved: 7.30 GB\nTransformer loaded only.\nin_channels: 128\n","output_type":"stream"}],"execution_count":15},{"cell_type":"code","source":"# CELL 10 β€” Attach LoRA to FLUX transformer (correct targets)\n\nimport torch\nfrom peft import LoraConfig, get_peft_model, PeftModel\n\n# ---- safety: unload previous adapters if exist ----\nif isinstance(transformer, PeftModel):\n transformer = transformer.unload()\n torch.cuda.empty_cache()\n\nlora_config = LoraConfig(\n r=8,\n lora_alpha=16,\n target_modules=[\n \"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\",\n \"fc1\", \"fc2\",\n \"proj_in\", \"proj_out\",\n ],\n lora_dropout=0.05,\n bias=\"none\",\n)\n\ntransformer = get_peft_model(transformer, lora_config)\n\ntransformer.enable_gradient_checkpointing()\n\n# ---- stats ----\ntrainable = 0\ntotal = 0\nfor p in transformer.parameters():\n total += p.numel()\n if p.requires_grad:\n trainable += p.numel()\n\nprint(f\"Trainable params: {trainable/1e6:.2f}M / {total/1e6:.2f}M\")\nprint(\"LoRA attached correctly. Gradient checkpointing enabled.\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:25:05.603216Z","iopub.execute_input":"2026-01-21T00:25:05.603984Z","iopub.status.idle":"2026-01-21T00:25:05.649825Z","shell.execute_reply.started":"2026-01-21T00:25:05.603926Z","shell.execute_reply":"2026-01-21T00:25:05.649219Z"}},"outputs":[{"name":"stdout","text":"Trainable params: 0.03M / 3875.57M\nLoRA attached correctly. Gradient checkpointing enabled.\n","output_type":"stream"}],"execution_count":16},{"cell_type":"code","source":"# CELL 11 β€” Flow Matching Training (NaN SAFE, FLUX)\n\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import trange\nimport os\nSTEPS = 130\n\ntorch.cuda.empty_cache()\n\n# ----------------------------\n# Patchify latents β†’ tokens\n# ----------------------------\ndef patchify_latents(latents):\n B, C, H, W = latents.shape\n latents = latents.reshape(B, C, H//2, 2, W//2, 2)\n latents = latents.permute(0, 2, 4, 1, 3, 5).contiguous()\n tokens = latents.view(B, (H//2)*(W//2), C*2*2) # [B, 4096, 128]\n return tokens\n\n\n# ----------------------------\n# FLUX rotary position ids\n# ----------------------------\ndef generate_flux_pos_ids(batch, ph, pw, txt_len, device, dtype):\n\n y = torch.linspace(0, 1, ph, device=device, dtype=dtype)\n x = torch.linspace(0, 1, pw, device=device, dtype=dtype)\n gy, gx = torch.meshgrid(y, x, indexing=\"ij\")\n\n pos_y = gy.flatten()\n pos_x = gx.flatten()\n\n scale = torch.zeros_like(pos_y)\n aspect = torch.zeros_like(pos_y)\n\n img_ids = torch.stack([pos_y, pos_x, scale, aspect], dim=-1)\n img_ids = img_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n t = torch.arange(txt_len, device=device, dtype=dtype) / txt_len\n zeros = torch.zeros_like(t)\n\n txt_ids = torch.stack([t, zeros, zeros, zeros], dim=-1)\n txt_ids = txt_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n return img_ids, txt_ids\n\n\n# ----------------------------\n# Optimizer (LoRA only)\n# ----------------------------\ntrainable_params = [p for p in transformer.parameters() if p.requires_grad]\n\noptimizer = torch.optim.AdamW(\n trainable_params,\n lr=LR,\n betas=(0.9, 0.999),\n weight_decay=1e-4,\n)\n\n\n# ----------------------------\n# Training loop\n# ----------------------------\ntransformer.train()\n\nsteps_done = 0\naccum_loss = 0.0\ndata_iter = iter(loader)\n\nfor step in trange(STEPS, desc=\"Training (Flow Matching)\"):\n\n try:\n latent_b, key = next(data_iter)\n except StopIteration:\n data_iter = iter(loader)\n latent_b, key = next(data_iter)\n\n latent_b = latent_b.squeeze(0).to(device, dtype=dtype)\n\n # cached Qwen embeddings\n enc_raw = text_cache[key[0]].unsqueeze(0).to(device, dtype=dtype)\n enc_b = enc_raw.repeat(1, 1, 3) # [1,256,7680]\n\n if step == 0:\n print(\"latent:\", latent_b.shape)\n print(\"text expanded:\", enc_b.shape)\n\n # --------------------\n # Patchify\n # --------------------\n tokens = patchify_latents(latent_b)\n tokens = torch.clamp(tokens, -5, 5)\n\n if step == 0:\n print(\"tokens:\", tokens.shape)\n\n # --------------------\n # Flow matching\n # --------------------\n eps = torch.randn_like(tokens)\n eps = torch.clamp(eps, -5, 5)\n\n t = torch.rand(tokens.size(0), device=device, dtype=dtype)\n z_t = (1 - t[:, None, None]) * eps + t[:, None, None] * tokens\n z_t = torch.nan_to_num(z_t, nan=0.0, posinf=1.0, neginf=-1.0)\n\n target = tokens - eps\n\n # smaller timestep scale (stability)\n t_embed = t * 100.0\n\n # --------------------\n # Positional IDs\n # --------------------\n B = tokens.size(0)\n ph = pw = 64\n\n img_ids, txt_ids = generate_flux_pos_ids(\n B, ph, pw, enc_b.size(1), device, dtype\n )\n\n # --------------------\n # Forward\n # --------------------\n with torch.autocast(\"cuda\", dtype=dtype):\n pred = transformer(\n hidden_states=z_t,\n timestep=t_embed,\n encoder_hidden_states=enc_b,\n img_ids=img_ids,\n txt_ids=txt_ids,\n return_dict=False\n )[0]\n\n loss = F.mse_loss(pred.float(), target.float())\n\n if not torch.isfinite(loss):\n print(\"⚠ NaN detected β€” skipping step\")\n optimizer.zero_grad(set_to_none=True)\n continue\n\n # --------------------\n # Backprop\n # --------------------\n loss = loss / ACCUM_STEPS\n loss.backward()\n accum_loss += loss.item()\n\n if (step + 1) % ACCUM_STEPS == 0:\n torch.nn.utils.clip_grad_norm_(trainable_params, 0.5)\n optimizer.step()\n optimizer.zero_grad(set_to_none=True)\n steps_done += 1\n\n if steps_done % 25 == 0:\n print(f\"Step {steps_done:04d} | Loss: {accum_loss/25:.6f}\")\n accum_loss = 0.0\n\n torch.cuda.empty_cache()\n\n\n# ----------------------------\n# Save LoRA\n# ----------------------------\nos.makedirs(SAVE_DIR, exist_ok=True)\ntransformer.save_pretrained(SAVE_DIR)\nprint(\"LoRA saved to:\", SAVE_DIR)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T01:07:48.907688Z","iopub.execute_input":"2026-01-21T01:07:48.908254Z","iopub.status.idle":"2026-01-21T01:08:58.923352Z","shell.execute_reply.started":"2026-01-21T01:07:48.908226Z","shell.execute_reply":"2026-01-21T01:08:58.922459Z"}},"outputs":[{"name":"stderr","text":"Training (Flow Matching): 0%| | 0/130 [00:00<?, ?it/s]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext expanded: torch.Size([1, 256, 7680])\ntokens: torch.Size([1, 4096, 128])\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 1%| | 1/130 [00:04<10:35, 4.92s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 2%|▏ | 2/130 [00:09<10:37, 4.98s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 2%|▏ | 3/130 [00:14<10:36, 5.01s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 3%|β–Ž | 4/130 [00:20<10:32, 5.02s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 4%|▍ | 5/130 [00:24<10:24, 5.00s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 5%|▍ | 6/130 [00:29<10:17, 4.98s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 5%|β–Œ | 7/130 [00:34<10:11, 4.97s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 6%|β–Œ | 8/130 [00:39<10:08, 4.99s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 7%|β–‹ | 9/130 [00:44<10:01, 4.97s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 8%|β–Š | 10/130 [00:49<09:59, 5.00s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 8%|β–Š | 11/130 [00:54<09:56, 5.01s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 9%|β–‰ | 12/130 [00:59<09:52, 5.02s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 10%|β–ˆ | 13/130 [01:05<09:47, 5.02s/it]","output_type":"stream"},{"name":"stdout","text":"⚠ NaN detected β€” skipping step\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 10%|β–ˆ | 13/130 [01:09<10:29, 5.38s/it]\n","output_type":"stream"},{"traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_55/1350163967.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 137\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 138\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0misfinite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 139\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"⚠ NaN detected β€” skipping step\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 140\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzero_grad\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mset_to_none\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0;32mcontinue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "],"ename":"KeyboardInterrupt","evalue":"","output_type":"error"}],"execution_count":32},{"cell_type":"code","source":"dataset","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T00:28:15.132368Z","iopub.execute_input":"2026-01-21T00:28:15.132663Z","iopub.status.idle":"2026-01-21T00:28:15.137829Z","shell.execute_reply.started":"2026-01-21T00:28:15.132638Z","shell.execute_reply":"2026-01-21T00:28:15.137176Z"}},"outputs":[{"execution_count":20,"output_type":"execute_result","data":{"text/plain":"<__main__.FluxLatentDataset at 0x7972b8000cb0>"},"metadata":{}}],"execution_count":20}]}