Datasets:
Tags:
Not-For-All-Audiences
Upload adcom-flux-klein-4b-lora-training.ipynb
Browse files
adcom-flux-klein-4b-lora-training.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":14564379,"sourceType":"datasetVersion","datasetId":8022630}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"HOW TO USE:\n1) Upload a civitai dataset .zip file to your google drive named kaggleset.zip \n2) Use the https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_latent.ipynb notebook \nto convert this dataset to flux_captions.json and flux_latents.safetensors (saved to your drive upon running the script)\n3) Create a private dataset called image-caption-dataset\n4) Add the flux_captions.json and flux_latents.safetensor to this dataset\n5) In this notebook , press the '+ Add input' button and select your private dataset\n6) Run this notebook\n//----//\nIf you have ideas on improvements / developments on FLUX Klein 4B LoRa \ntraining let me know in the comment section of this repo","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# CELL 1 β Install correct versions\n\n!pip uninstall -y torch torchvision torchaudio diffusers accelerate peft transformers\n\n!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n\n!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n\n!pip install git+https://github.com/huggingface/diffusers.git\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:14:03.993767Z","iopub.execute_input":"2026-01-21T02:14:03.994085Z","iopub.status.idle":"2026-01-21T02:17:15.820999Z","shell.execute_reply.started":"2026-01-21T02:14:03.994054Z","shell.execute_reply":"2026-01-21T02:17:15.820052Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"name":"stdout","text":"Found existing installation: torch 2.8.0+cu126\nUninstalling torch-2.8.0+cu126:\n Successfully uninstalled torch-2.8.0+cu126\nFound existing installation: torchvision 0.23.0+cu126\nUninstalling torchvision-0.23.0+cu126:\n Successfully uninstalled torchvision-0.23.0+cu126\nFound existing installation: torchaudio 2.8.0+cu126\nUninstalling torchaudio-2.8.0+cu126:\n Successfully uninstalled torchaudio-2.8.0+cu126\nFound existing installation: diffusers 0.35.2\nUninstalling diffusers-0.35.2:\n Successfully uninstalled diffusers-0.35.2\nFound existing installation: accelerate 1.11.0\nUninstalling accelerate-1.11.0:\n Successfully uninstalled accelerate-1.11.0\nFound existing installation: peft 0.17.1\nUninstalling peft-0.17.1:\n Successfully uninstalled peft-0.17.1\nFound existing installation: transformers 4.57.1\nUninstalling transformers-4.57.1:\n Successfully uninstalled transformers-4.57.1\nLooking in indexes: https://download.pytorch.org/whl/cu121\nCollecting torch==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (780.4 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m780.4/780.4 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchvision==0.20.1\n Downloading https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp312-cp312-linux_x86_64.whl (7.3 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m7.3/7.3 MB\u001b[0m \u001b[31m124.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchaudio==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torchaudio-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (3.4 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m106.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hInstalling collected packages: torchaudio, torchvision, torch\nSuccessfully installed torch-2.5.1+cu121 torchaudio-2.5.1+cu121 torchvision-0.20.1+cu121\nCollecting diffusers\n Downloading diffusers-0.36.0-py3-none-any.whl.metadata (20 kB)\nCollecting transformers\n Downloading transformers-4.57.6-py3-none-any.whl.metadata (43 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m44.0/44.0 kB\u001b[0m \u001b[31m139.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting accelerate\n Downloading accelerate-1.12.0-py3-none-any.whl.metadata (19 kB)\nCollecting peft\n Downloading peft-0.18.1-py3-none-any.whl.metadata (14 kB)\nRequirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (0.6.2)\nCollecting safetensors\n Downloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\nRequirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\nCollecting huggingface-hub\n Downloading huggingface_hub-1.3.2-py3-none-any.whl.metadata (13 kB)\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.28.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.32.5)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers) (11.3.0)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers) (6.0.3)\nRequirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (0.22.1)\nRequirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate) (5.9.5)\nRequirement already satisfied: torch>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from accelerate) (2.5.1+cu121)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (2025.10.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (1.2.1rc0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers) (0.16.0)\nRequirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.1.6)\nCollecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cudnn-cu12==9.1.0.70 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cublas-cu12==12.1.3.1 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cufft-cu12==11.0.2.54 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-curand-cu12==10.3.2.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-nccl-cu12==2.21.5 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl.metadata (1.8 kB)\nCollecting nvidia-nvtx-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.7 kB)\nCollecting triton==3.1.0 (from torch>=2.0.0->accelerate)\n Downloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.3 kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (75.2.0)\nCollecting sympy==1.13.1 (from torch>=2.0.0->accelerate)\n Downloading sympy-1.13.1-py3-none-any.whl.metadata (12 kB)\nRequirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.12/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->accelerate) (12.6.85)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy==1.13.1->torch>=2.0.0->accelerate) (1.3.0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (2.6.3)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=2.0.0->accelerate) (3.0.3)\nDownloading diffusers-0.36.0-py3-none-any.whl (4.6 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m4.6/4.6 MB\u001b[0m \u001b[31m59.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading transformers-4.57.6-py3-none-any.whl (12.0 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m284.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading accelerate-1.12.0-py3-none-any.whl (380 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m380.9/380.9 kB\u001b[0m \u001b[31m43.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading peft-0.18.1-py3-none-any.whl (556 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m557.0/557.0 kB\u001b[0m \u001b[31m341.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m507.2/507.2 kB\u001b[0m \u001b[31m361.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m253.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m237.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m293.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m379.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m219.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m333.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m353.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m264.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m281.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl (188.7 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m188.7/188.7 MB\u001b[0m \u001b[31m211.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m324.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading sympy-1.13.1-py3-none-any.whl (6.2 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m6.2/6.2 MB\u001b[0m \u001b[31m285.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (209.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m209.6/209.6 MB\u001b[0m \u001b[31m281.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hInstalling collected packages: triton, sympy, safetensors, nvidia-nvtx-cu12, nvidia-nccl-cu12, nvidia-cusparse-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusolver-cu12, nvidia-cudnn-cu12, diffusers, transformers, accelerate, peft\n Attempting uninstall: triton\n Found existing installation: triton 3.4.0\n Uninstalling triton-3.4.0:\n Successfully uninstalled triton-3.4.0\n Attempting uninstall: sympy\n Found existing installation: sympy 1.13.3\n Uninstalling sympy-1.13.3:\n Successfully uninstalled sympy-1.13.3\n Attempting uninstall: safetensors\n Found existing installation: safetensors 0.6.2\n Uninstalling safetensors-0.6.2:\n Successfully uninstalled safetensors-0.6.2\n Attempting uninstall: nvidia-nvtx-cu12\n Found existing installation: nvidia-nvtx-cu12 12.6.77\n Uninstalling nvidia-nvtx-cu12-12.6.77:\n Successfully uninstalled nvidia-nvtx-cu12-12.6.77\n Attempting uninstall: nvidia-nccl-cu12\n Found existing installation: nvidia-nccl-cu12 2.27.3\n Uninstalling nvidia-nccl-cu12-2.27.3:\n Successfully uninstalled nvidia-nccl-cu12-2.27.3\n Attempting uninstall: nvidia-cusparse-cu12\n Found existing installation: nvidia-cusparse-cu12 12.5.4.2\n Uninstalling nvidia-cusparse-cu12-12.5.4.2:\n Successfully uninstalled nvidia-cusparse-cu12-12.5.4.2\n Attempting uninstall: nvidia-curand-cu12\n Found existing installation: nvidia-curand-cu12 10.3.7.77\n Uninstalling nvidia-curand-cu12-10.3.7.77:\n Successfully uninstalled nvidia-curand-cu12-10.3.7.77\n Attempting uninstall: nvidia-cufft-cu12\n Found existing installation: nvidia-cufft-cu12 11.3.0.4\n Uninstalling nvidia-cufft-cu12-11.3.0.4:\n Successfully uninstalled nvidia-cufft-cu12-11.3.0.4\n Attempting uninstall: nvidia-cuda-runtime-cu12\n Found existing installation: nvidia-cuda-runtime-cu12 12.6.77\n Uninstalling nvidia-cuda-runtime-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-runtime-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-nvrtc-cu12\n Found existing installation: nvidia-cuda-nvrtc-cu12 12.6.77\n Uninstalling nvidia-cuda-nvrtc-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-cupti-cu12\n Found existing installation: nvidia-cuda-cupti-cu12 12.6.80\n Uninstalling nvidia-cuda-cupti-cu12-12.6.80:\n Successfully uninstalled nvidia-cuda-cupti-cu12-12.6.80\n Attempting uninstall: nvidia-cublas-cu12\n Found existing installation: nvidia-cublas-cu12 12.6.4.1\n Uninstalling nvidia-cublas-cu12-12.6.4.1:\n Successfully uninstalled nvidia-cublas-cu12-12.6.4.1\n Attempting uninstall: nvidia-cusolver-cu12\n Found existing installation: nvidia-cusolver-cu12 11.7.1.2\n Uninstalling nvidia-cusolver-cu12-11.7.1.2:\n Successfully uninstalled nvidia-cusolver-cu12-11.7.1.2\n Attempting uninstall: nvidia-cudnn-cu12\n Found existing installation: nvidia-cudnn-cu12 9.10.2.21\n Uninstalling nvidia-cudnn-cu12-9.10.2.21:\n Successfully uninstalled nvidia-cudnn-cu12-9.10.2.21\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ncudf-cu12 25.6.0 requires pyarrow<20.0.0a0,>=14.0.0; platform_machine == \"x86_64\", but you have pyarrow 22.0.0 which is incompatible.\nfastai 2.8.4 requires fastcore<1.9,>=1.8.0, but you have fastcore 1.11.3 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mSuccessfully installed accelerate-1.12.0 diffusers-0.36.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.21.5 nvidia-nvtx-cu12-12.1.105 peft-0.18.1 safetensors-0.7.0 sympy-1.13.1 transformers-4.57.6 triton-3.1.0\nCollecting git+https://github.com/huggingface/diffusers.git\n Cloning https://github.com/huggingface/diffusers.git to /tmp/pip-req-build-e65jvyey\n Running command git clone --filter=blob:none --quiet https://github.com/huggingface/diffusers.git /tmp/pip-req-build-e65jvyey\n Resolved https://github.com/huggingface/diffusers.git to commit ec376293714f269947f6d9d8a572bd73040bc1a0\n Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.28.1)\nRequirement already satisfied: huggingface-hub<2.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.36.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.32.5)\nRequirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.7.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (11.3.0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers==0.37.0.dev0) (0.16.0)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (2025.10.0)\nRequirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (6.0.3)\nRequirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.67.1)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (1.2.1rc0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers==0.37.0.dev0) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (2.6.3)\nBuilding wheels for collected packages: diffusers\n Building wheel for diffusers (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n Created wheel for diffusers: filename=diffusers-0.37.0.dev0-py3-none-any.whl size=4893406 sha256=3f252fa1a94143458dc964e4ec52bda28d0a038d678d90ec8ff4b05ca5cb84b4\n Stored in directory: /tmp/pip-ephem-wheel-cache-p_q10gbx/wheels/23/0f/7d/f97813d265ed0e599a78d83afd4e1925740896ca79b46cccfd\nSuccessfully built diffusers\nInstalling collected packages: diffusers\n Attempting uninstall: diffusers\n Found existing installation: diffusers 0.36.0\n Uninstalling diffusers-0.36.0:\n Successfully uninstalled diffusers-0.36.0\nSuccessfully installed diffusers-0.37.0.dev0\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"# CELL 2 β Verify\n\nimport torch, diffusers\n\nprint(\"Torch:\", torch.__version__)\nprint(\"Diffusers:\", diffusers.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\nprint(\"GPU:\", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"None\")\n\nfrom diffusers import Flux2KleinPipeline\nprint(\"Flux2KleinPipeline OK\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:18:19.302118Z","iopub.execute_input":"2026-01-21T02:18:19.302736Z","iopub.status.idle":"2026-01-21T02:18:19.307701Z","shell.execute_reply.started":"2026-01-21T02:18:19.302703Z","shell.execute_reply":"2026-01-21T02:18:19.306888Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"name":"stdout","text":"Torch: 2.5.1+cu121\nDiffusers: 0.37.0.dev0\nCUDA: True\nGPU: Tesla P100-PCIE-16GB\nFlux2KleinPipeline OK\n","output_type":"stream"}],"execution_count":6},{"cell_type":"code","source":"# CELL 3 β Config\n\nimport os\n\ndevice = \"cuda\"\ndtype = torch.float16\n\nDATASET_NAME = \"image-caption-dataset\" # change if needed\n\nCAPTIONS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_captions.json\"\nLATENTS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_latents.safetensors\"\n\nCACHE_DIR = \"/kaggle/working/cache\"\nSAVE_DIR = \"/kaggle/working/flux_klein_lora\"\n\nos.makedirs(CACHE_DIR, exist_ok=True)\nos.makedirs(SAVE_DIR, exist_ok=True)\n\n# training\nACCUM_STEPS = 2\nALPHA = 16\n#--#\nLR = 2e-5\nSTEPS = 1000 # or more\nRANK = 16 # better for FLUX\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:17:39.718188Z","iopub.execute_input":"2026-01-21T02:17:39.718708Z","iopub.status.idle":"2026-01-21T02:17:39.724513Z","shell.execute_reply.started":"2026-01-21T02:17:39.718682Z","shell.execute_reply":"2026-01-21T02:17:39.723768Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"# CELL 4 β Load captions + latents\n\nimport json\nfrom safetensors.torch import load_file\n\nwith open(CAPTIONS_PATH) as f:\n captions = json.load(f)\n\nlatents = load_file(LATENTS_PATH)\n\nkeys = list(captions.keys())\nprint(\"Samples:\", len(keys))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:18:26.117278Z","iopub.execute_input":"2026-01-21T02:18:26.117550Z","iopub.status.idle":"2026-01-21T02:18:26.131422Z","shell.execute_reply.started":"2026-01-21T02:18:26.117527Z","shell.execute_reply":"2026-01-21T02:18:26.130767Z"}},"outputs":[{"name":"stdout","text":"Samples: 125\n","output_type":"stream"}],"execution_count":7},{"cell_type":"code","source":"# CELL 5 β Dataset (returns latent + key)\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nclass FluxLatentDataset(Dataset):\n def __len__(self):\n return len(keys)\n\n def __getitem__(self, idx):\n k = keys[idx]\n return latents[f\"{k}\"], k # <-- return KEY, not caption\n\ndataset = FluxLatentDataset()\nloader = DataLoader(dataset, batch_size=1, shuffle=True)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:18:29.463210Z","iopub.execute_input":"2026-01-21T02:18:29.463824Z","iopub.status.idle":"2026-01-21T02:18:29.468911Z","shell.execute_reply.started":"2026-01-21T02:18:29.463792Z","shell.execute_reply":"2026-01-21T02:18:29.468198Z"}},"outputs":[],"execution_count":8},{"cell_type":"code","source":"# CELL 6 β Load tokenizer + text encoder (CPU only)\n\nimport torch\nfrom transformers import AutoTokenizer, AutoModel\n\nMODEL_ID = \"black-forest-labs/FLUX.2-klein-4B\"\n\ntokenizer = AutoTokenizer.from_pretrained(\n MODEL_ID,\n subfolder=\"tokenizer\",\n trust_remote_code=True,\n cache_dir=CACHE_DIR\n)\n\ntext_encoder = AutoModel.from_pretrained(\n MODEL_ID,\n subfolder=\"text_encoder\",\n trust_remote_code=True,\n dtype=torch.float16,\n device_map=\"cpu\",\n cache_dir=CACHE_DIR\n).eval()\n\nprint(\"Text encoder loaded on CPU.\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:18:49.193705Z","iopub.execute_input":"2026-01-21T02:18:49.194038Z","iopub.status.idle":"2026-01-21T02:19:25.767176Z","shell.execute_reply.started":"2026-01-21T02:18:49.194009Z","shell.execute_reply":"2026-01-21T02:19:25.766547Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"eaead1ef86c241a0b09925c63113296c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vocab.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"110fb3d3f23e412595bbb447dfa4d51a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"merges.txt: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"e2837b2321234dfc842f91fb5e5bf3f0"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer/tokenizer.json: 0%| | 0.00/11.4M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"9c79e0df8c754106837ce8cb8ce10899"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"added_tokens.json: 0%| | 0.00/707 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"779c2c951331430ab4ff42f2125ef746"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"special_tokens_map.json: 0%| | 0.00/613 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a91ccd8fa80b4511ad8337c0e37a636a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"chat_template.jinja: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"60ca6aa01d1145b3807dccfa7ab62fde"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7782388fcfe94fab825c8244150a131e"}},"metadata":{}},{"name":"stderr","text":"`torch_dtype` is deprecated! Use `dtype` instead!\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"model.safetensors.index.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"0cf64e12a8c845fcb0fe6c4d3fa40707"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 2 files: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"38d3c1b558ad417ca950a558a987993c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00002-of-00002.safete(β¦): 0%| | 0.00/3.08G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2ab2719c9e5340b8aec82facc422d5d7"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00001-of-00002.safete(β¦): 0%| | 0.00/4.97G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"111599098a2c4c1db2a1197773877f09"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7cc59ebdc19d4f779b6651f1339b9a02"}},"metadata":{}},{"name":"stdout","text":"Text encoder loaded on CPU.\n","output_type":"stream"}],"execution_count":9},{"cell_type":"code","source":"# CELL 7 β Cache text embeddings using GPU, then fully unload text encoder\n\nimport torch, gc\nfrom tqdm import tqdm\n\ntext_encoder = text_encoder.to(\"cuda\")\n\ntext_cache = {}\n\nwith torch.no_grad():\n for k, caption in tqdm(captions.items(), desc=\"Caching text embeddings\"):\n\n inputs = tokenizer(\n caption,\n padding=\"max_length\",\n truncation=True,\n max_length=128, # reduce VRAM & attention cost\n return_tensors=\"pt\"\n ).to(\"cuda\")\n\n hidden = text_encoder(**inputs).last_hidden_state # [1,128,2560]\n text_cache[k] = hidden.cpu()\n\n# ---- FULL CLEANUP ----\ndel text_encoder\ndel tokenizer\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint(\"Text embeddings cached. Text encoder REMOVED from VRAM.\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:19:25.768357Z","iopub.execute_input":"2026-01-21T02:19:25.768612Z","iopub.status.idle":"2026-01-21T02:19:49.755542Z","shell.execute_reply.started":"2026-01-21T02:19:25.768588Z","shell.execute_reply":"2026-01-21T02:19:49.754766Z"}},"outputs":[{"name":"stderr","text":"Caching text embeddings: 100%|ββββββββββ| 125/125 [00:20<00:00, 5.99it/s]\n","output_type":"stream"},{"name":"stdout","text":"Text embeddings cached. Text encoder REMOVED from VRAM.\n","output_type":"stream"}],"execution_count":10},{"cell_type":"code","source":"# CELL 8 β Load pipeline, move ONLY transformer to GPU\n\nimport gc, torch\nfrom diffusers import Flux2KleinPipeline\n\ntorch.cuda.empty_cache()\ngc.collect()\n\npipe = Flux2KleinPipeline.from_pretrained(\n MODEL_ID,\n torch_dtype=dtype,\n cache_dir=CACHE_DIR,\n)\n\npipe.transformer = pipe.transformer.to(\"cuda\")\n\n# remove unused modules\npipe.text_encoder = None\npipe.vae = None\npipe.scheduler = None\npipe.tokenizer = None\n\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint(\"Transformer on GPU. No text encoder in memory.\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:19:49.756960Z","iopub.execute_input":"2026-01-21T02:19:49.757276Z","iopub.status.idle":"2026-01-21T02:20:58.979757Z","shell.execute_reply.started":"2026-01-21T02:19:49.757222Z","shell.execute_reply":"2026-01-21T02:20:58.979052Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"output_type":"display_data","data":{"text/plain":"model_index.json: 0%| | 0.00/446 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"ff77a4dd0c7247e89595c653dd29c099"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 17 files: 0%| | 0/17 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"4c80a945879748d7b2cbdb90bd8fb9f0"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/541 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"76ef0355a5e3404cb976b5a0bd8a1b6e"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"scheduler_config.json: 0%| | 0.00/486 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"ffeb38ec9a1447f0a311be051c53b90b"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/821 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"9c2298e65e90480ab6b5b84300c7b082"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"transformer/diffusion_pytorch_model.safe(β¦): 0%| | 0.00/7.75G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7bee3a85bada4dfb8cdb46e3058e07f6"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vae/diffusion_pytorch_model.safetensors: 0%| | 0.00/168M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5b70e4d2fcd84e0ca80fd94bdd2fac91"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"90703f7a3a6149279391a3e2ec13fd24"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"9ad93b325fca4e22b416195a046f5998"}},"metadata":{}},{"name":"stdout","text":"Transformer on GPU. No text encoder in memory.\n","output_type":"stream"}],"execution_count":11},{"cell_type":"code","source":"# CELL 9 β Attach LoRA to transformer only\n\nimport torch\nfrom peft import LoraConfig, get_peft_model, PeftModel\n\ntransformer = pipe.transformer\n\nif isinstance(transformer, PeftModel):\n transformer = transformer.unload()\n torch.cuda.empty_cache()\n\nlora_config = LoraConfig(\n r=RANK,\n lora_alpha=ALPHA,\n target_modules=[\n \"q_proj\", \"k_proj\", \"v_proj\", \"out_proj\",\n \"fc1\", \"fc2\",\n \"proj_in\", \"proj_out\",\n ],\n lora_dropout=0.05,\n bias=\"none\",\n)\n\ntransformer = get_peft_model(transformer, lora_config)\ntransformer.enable_gradient_checkpointing()\npipe.transformer = transformer\n\ntrainable = sum(p.numel() for p in transformer.parameters() if p.requires_grad)\ntotal = sum(p.numel() for p in transformer.parameters())\nprint(f\"Trainable params: {trainable/1e6:.2f}M / {total/1e6:.2f}M\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:20:58.980903Z","iopub.execute_input":"2026-01-21T02:20:58.981141Z","iopub.status.idle":"2026-01-21T02:20:59.030477Z","shell.execute_reply.started":"2026-01-21T02:20:58.981118Z","shell.execute_reply":"2026-01-21T02:20:59.029874Z"}},"outputs":[{"name":"stdout","text":"Trainable params: 0.05M / 3875.60M\n","output_type":"stream"}],"execution_count":12},{"cell_type":"code","source":"# =========================================================\n# CELL 10 β Flow Matching Training (FLUX-2-Klein, LoRA only)\n# =========================================================\n\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import trange\nimport os, gc\n\n# ----------------------------\n# CONSTANTS (SAFE DEFAULTS)\n# ----------------------------\nSTEPS = 1000\nACCUM_STEPS = 1\nLR = 1e-4\nMAX_TXT_TOKENS = 128 # must match cache\nFLOW_T_SCALE = 50.0\nCLAMP_VAL = 3.0\nGRAD_CLIP = 0.5\n\ndevice = \"cuda\"\ndtype = torch.float16\n\n# ----------------------------\n# Helpers\n# ----------------------------\ndef patchify_latents(latents):\n # latents: [B,32,128,128] -> tokens: [B,4096,128]\n B, C, H, W = latents.shape\n latents = latents.reshape(B, C, H//2, 2, W//2, 2)\n latents = latents.permute(0, 2, 4, 1, 3, 5).contiguous()\n tokens = latents.view(B, (H//2)*(W//2), C*2*2)\n return tokens\n\n\ndef generate_flux_pos_ids(batch, ph, pw, txt_len, device, dtype):\n # image ids\n y = torch.linspace(0, 1, ph, device=device, dtype=dtype)\n x = torch.linspace(0, 1, pw, device=device, dtype=dtype)\n gy, gx = torch.meshgrid(y, x, indexing=\"ij\")\n\n pos_y = gy.flatten()\n pos_x = gx.flatten()\n scale = torch.zeros_like(pos_y)\n aspect = torch.zeros_like(pos_y)\n\n img_ids = torch.stack([pos_y, pos_x, scale, aspect], dim=-1)\n img_ids = img_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n # text ids\n t = torch.arange(txt_len, device=device, dtype=dtype) / txt_len\n zeros = torch.zeros_like(t)\n txt_ids = torch.stack([t, zeros, zeros, zeros], dim=-1)\n txt_ids = txt_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n return img_ids, txt_ids\n\n\n# ----------------------------\n# Model setup\n# ----------------------------\ntorch.cuda.empty_cache()\ngc.collect()\n\npipe.transformer.train()\npipe.transformer.enable_gradient_checkpointing()\n\n# ----------------------------\n# Optimizer (LoRA only)\n# ----------------------------\ntrainable_params = [p for p in pipe.transformer.parameters() if p.requires_grad]\n\noptimizer = torch.optim.AdamW(\n trainable_params,\n lr=LR,\n betas=(0.9, 0.999),\n weight_decay=1e-4,\n)\n\n# ----------------------------\n# Training loop\n# ----------------------------\nsteps_done = 0\naccum_loss = 0.0\ndata_iter = iter(loader)\n\nfor step in trange(STEPS, desc=\"Training (Flow Matching)\"):\n\n try:\n latent_b, key = next(data_iter)\n except StopIteration:\n data_iter = iter(loader)\n latent_b, key = next(data_iter)\n\n latent_b = latent_b.squeeze(0).to(device, dtype=dtype) # [1,32,128,128]\n caption = captions[key[0]]\n\n # -------------------------------------------------\n # Text conditioning (CACHED, RAW, NOT PROJECTED)\n # -------------------------------------------------\n with torch.no_grad():\n txt_hidden = text_cache[key[0]].to(device, dtype=dtype) # [1,T,2560]\n txt_hidden = txt_hidden.repeat(1, 1, 3) # -> [1,T,7680]\n enc_b = txt_hidden # DO NOT project\n\n # --------------------\n # Patchify\n # --------------------\n tokens = patchify_latents(latent_b) # [1,4096,128]\n tokens = torch.clamp(tokens, -CLAMP_VAL, CLAMP_VAL)\n\n # --------------------\n # Flow matching\n # --------------------\n eps = torch.randn_like(tokens)\n eps = torch.clamp(eps, -CLAMP_VAL, CLAMP_VAL)\n\n t = torch.rand(tokens.size(0), device=device, dtype=dtype)\n\n z_t = (1 - t[:, None, None]) * eps + t[:, None, None] * tokens\n z_t = torch.nan_to_num(z_t, nan=0.0, posinf=1.0, neginf=-1.0)\n\n target = tokens - eps\n t_embed = t * FLOW_T_SCALE\n\n # --------------------\n # Positional IDs\n # --------------------\n B = tokens.size(0)\n ph = pw = 64\n\n img_ids, txt_ids = generate_flux_pos_ids(\n B, ph, pw, enc_b.size(1), device, dtype\n )\n\n # --------------------\n # SANITY PRINT (once)\n # --------------------\n if step == 0:\n print(\"latent:\", latent_b.shape)\n print(\"tokens:\", tokens.shape)\n print(\"text enc:\", enc_b.shape)\n print(\"img_ids:\", img_ids.shape)\n print(\"txt_ids:\", txt_ids.shape)\n\n # --------------------\n # Forward\n # --------------------\n with torch.autocast(\"cuda\", dtype=torch.float16):\n pred = pipe.transformer(\n hidden_states=z_t,\n timestep=t_embed,\n encoder_hidden_states=enc_b,\n img_ids=img_ids,\n txt_ids=txt_ids,\n return_dict=False\n )[0]\n\n loss = F.mse_loss(pred.float(), target.float())\n\n if not torch.isfinite(loss):\n print(\"β NaN detected β skipping\")\n optimizer.zero_grad(set_to_none=True)\n continue\n\n # --------------------\n # Backprop\n # --------------------\n loss = loss / ACCUM_STEPS\n loss.backward()\n accum_loss += loss.item()\n\n if (step + 1) % ACCUM_STEPS == 0:\n torch.nn.utils.clip_grad_norm_(trainable_params, GRAD_CLIP)\n optimizer.step()\n optimizer.zero_grad(set_to_none=True)\n steps_done += 1\n\n if steps_done % 25 == 0:\n print(f\"Step {steps_done:04d} | Loss: {accum_loss/25:.6f}\")\n accum_loss = 0.0\n\n torch.cuda.empty_cache()\n\n\n# ----------------------------\n# Save LoRA\n# ----------------------------\nos.makedirs(SAVE_DIR, exist_ok=True)\npipe.transformer.save_pretrained(SAVE_DIR)\nprint(\"LoRA saved to:\", SAVE_DIR)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T02:28:10.593939Z","iopub.execute_input":"2026-01-21T02:28:10.594688Z"}},"outputs":[{"name":"stderr","text":"Training (Flow Matching): 0%| | 0/1000 [00:00<?, ?it/s]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntokens: torch.Size([1, 4096, 128])\ntext enc: torch.Size([1, 128, 7680])\nimg_ids: torch.Size([1, 4096, 4])\ntxt_ids: torch.Size([1, 128, 4])\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 2%|β | 25/1000 [02:01<1:19:40, 4.90s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0025 | Loss: 4.123635\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 5%|β | 50/1000 [04:03<1:17:17, 4.88s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0050 | Loss: 4.104446\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 8%|β | 75/1000 [06:05<1:15:12, 4.88s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0075 | Loss: 3.954001\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 10%|β | 100/1000 [08:07<1:13:11, 4.88s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0100 | Loss: 3.929707\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 12%|ββ | 125/1000 [10:09<1:11:10, 4.88s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0125 | Loss: 4.166590\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 15%|ββ | 150/1000 [12:11<1:08:57, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0150 | Loss: 3.922482\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 18%|ββ | 175/1000 [14:13<1:07:00, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0175 | Loss: 3.895434\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 20%|ββ | 200/1000 [16:14<1:04:57, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0200 | Loss: 3.813114\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 22%|βββ | 225/1000 [18:16<1:02:58, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0225 | Loss: 3.854708\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 25%|βββ | 250/1000 [20:18<1:00:53, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0250 | Loss: 4.064212\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 28%|βββ | 275/1000 [22:20<58:52, 4.87s/it] ","output_type":"stream"},{"name":"stdout","text":"Step 0275 | Loss: 3.930090\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 30%|βββ | 300/1000 [24:21<56:46, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0300 | Loss: 3.822321\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 32%|ββββ | 325/1000 [26:23<54:47, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0325 | Loss: 3.833376\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 35%|ββββ | 350/1000 [28:25<52:45, 4.87s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0350 | Loss: 3.725178\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 38%|ββββ | 375/1000 [30:27<50:48, 4.88s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0375 | Loss: 3.863648\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 38%|ββββ | 382/1000 [31:01<50:10, 4.87s/it]","output_type":"stream"}],"execution_count":null}]}
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":14564379,"sourceType":"datasetVersion","datasetId":8022630}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"HOW TO USE:\n1) Upload a civitai dataset .zip file to your google drive named kaggleset.zip \n2) Use the https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_latent.ipynb notebook \nto convert this dataset to flux_captions.json and flux_latents.safetensors (saved to your drive upon running the script)\n3) Create a private dataset called image-caption-dataset\n4) Add the flux_captions.json and flux_latents.safetensor to this dataset\n5) In this notebook , press the '+ Add input' button and select your private dataset\n6) Run this notebook\n//----//\nIf you have ideas on improvements / developments on FLUX Klein 4B LoRa \ntraining let me know in the comment section of this repo","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# CELL 1 β Install correct versions\n\n!pip uninstall -y torch torchvision torchaudio diffusers accelerate peft transformers\n\n!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n\n!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n\n!pip install git+https://github.com/huggingface/diffusers.git\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:07:52.931255Z","iopub.execute_input":"2026-01-21T05:07:52.931584Z","iopub.status.idle":"2026-01-21T05:11:07.828035Z","shell.execute_reply.started":"2026-01-21T05:07:52.931548Z","shell.execute_reply":"2026-01-21T05:11:07.827200Z"}},"outputs":[{"name":"stdout","text":"Found existing installation: torch 2.8.0+cu126\nUninstalling torch-2.8.0+cu126:\n Successfully uninstalled torch-2.8.0+cu126\nFound existing installation: torchvision 0.23.0+cu126\nUninstalling torchvision-0.23.0+cu126:\n Successfully uninstalled torchvision-0.23.0+cu126\nFound existing installation: torchaudio 2.8.0+cu126\nUninstalling torchaudio-2.8.0+cu126:\n Successfully uninstalled torchaudio-2.8.0+cu126\nFound existing installation: diffusers 0.35.2\nUninstalling diffusers-0.35.2:\n Successfully uninstalled diffusers-0.35.2\nFound existing installation: accelerate 1.11.0\nUninstalling accelerate-1.11.0:\n Successfully uninstalled accelerate-1.11.0\nFound existing installation: peft 0.17.1\nUninstalling peft-0.17.1:\n Successfully uninstalled peft-0.17.1\nFound existing installation: transformers 4.57.1\nUninstalling transformers-4.57.1:\n Successfully uninstalled transformers-4.57.1\nLooking in indexes: https://download.pytorch.org/whl/cu121\nCollecting torch==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (780.4 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m780.4/780.4 MB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchvision==0.20.1\n Downloading https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp312-cp312-linux_x86_64.whl (7.3 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m7.3/7.3 MB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m0:00:01\u001b[0m\n\u001b[?25hCollecting torchaudio==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torchaudio-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (3.4 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m78.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m\n\u001b[?25hInstalling collected packages: torchaudio, torchvision, torch\nSuccessfully installed torch-2.5.1+cu121 torchaudio-2.5.1+cu121 torchvision-0.20.1+cu121\nCollecting diffusers\n Downloading diffusers-0.36.0-py3-none-any.whl.metadata (20 kB)\nCollecting transformers\n Downloading transformers-4.57.6-py3-none-any.whl.metadata (43 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m44.0/44.0 kB\u001b[0m \u001b[31m30.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting accelerate\n Downloading accelerate-1.12.0-py3-none-any.whl.metadata (19 kB)\nCollecting peft\n Downloading peft-0.18.1-py3-none-any.whl.metadata (14 kB)\nRequirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (0.6.2)\nCollecting safetensors\n Downloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\nRequirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\nCollecting huggingface-hub\n Downloading huggingface_hub-1.3.2-py3-none-any.whl.metadata (13 kB)\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.28.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.32.5)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers) (11.3.0)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers) (6.0.3)\nRequirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (0.22.1)\nRequirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate) (5.9.5)\nRequirement already satisfied: torch>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from accelerate) (2.5.1+cu121)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (2025.10.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (1.2.1rc0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers) (0.16.0)\nRequirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.1.6)\nCollecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cudnn-cu12==9.1.0.70 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cublas-cu12==12.1.3.1 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cufft-cu12==11.0.2.54 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-curand-cu12==10.3.2.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-nccl-cu12==2.21.5 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl.metadata (1.8 kB)\nCollecting nvidia-nvtx-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.7 kB)\nCollecting triton==3.1.0 (from torch>=2.0.0->accelerate)\n Downloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.3 kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (75.2.0)\nCollecting sympy==1.13.1 (from torch>=2.0.0->accelerate)\n Downloading sympy-1.13.1-py3-none-any.whl.metadata (12 kB)\nRequirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.12/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->accelerate) (12.6.85)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy==1.13.1->torch>=2.0.0->accelerate) (1.3.0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (2.6.3)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=2.0.0->accelerate) (3.0.3)\nDownloading diffusers-0.36.0-py3-none-any.whl (4.6 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m4.6/4.6 MB\u001b[0m \u001b[31m69.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading transformers-4.57.6-py3-none-any.whl (12.0 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m221.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading accelerate-1.12.0-py3-none-any.whl (380 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m380.9/380.9 kB\u001b[0m \u001b[31m348.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading peft-0.18.1-py3-none-any.whl (556 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m557.0/557.0 kB\u001b[0m \u001b[31m333.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m507.2/507.2 kB\u001b[0m \u001b[31m307.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m273.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m204.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m203.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m346.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m286.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m297.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m194.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m226.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m195.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl (188.7 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m188.7/188.7 MB\u001b[0m \u001b[31m181.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m282.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading sympy-1.13.1-py3-none-any.whl (6.2 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m6.2/6.2 MB\u001b[0m \u001b[31m302.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (209.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m209.6/209.6 MB\u001b[0m \u001b[31m217.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hInstalling collected packages: triton, sympy, safetensors, nvidia-nvtx-cu12, nvidia-nccl-cu12, nvidia-cusparse-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusolver-cu12, nvidia-cudnn-cu12, diffusers, transformers, accelerate, peft\n Attempting uninstall: triton\n Found existing installation: triton 3.4.0\n Uninstalling triton-3.4.0:\n Successfully uninstalled triton-3.4.0\n Attempting uninstall: sympy\n Found existing installation: sympy 1.13.3\n Uninstalling sympy-1.13.3:\n Successfully uninstalled sympy-1.13.3\n Attempting uninstall: safetensors\n Found existing installation: safetensors 0.6.2\n Uninstalling safetensors-0.6.2:\n Successfully uninstalled safetensors-0.6.2\n Attempting uninstall: nvidia-nvtx-cu12\n Found existing installation: nvidia-nvtx-cu12 12.6.77\n Uninstalling nvidia-nvtx-cu12-12.6.77:\n Successfully uninstalled nvidia-nvtx-cu12-12.6.77\n Attempting uninstall: nvidia-nccl-cu12\n Found existing installation: nvidia-nccl-cu12 2.27.3\n Uninstalling nvidia-nccl-cu12-2.27.3:\n Successfully uninstalled nvidia-nccl-cu12-2.27.3\n Attempting uninstall: nvidia-cusparse-cu12\n Found existing installation: nvidia-cusparse-cu12 12.5.4.2\n Uninstalling nvidia-cusparse-cu12-12.5.4.2:\n Successfully uninstalled nvidia-cusparse-cu12-12.5.4.2\n Attempting uninstall: nvidia-curand-cu12\n Found existing installation: nvidia-curand-cu12 10.3.7.77\n Uninstalling nvidia-curand-cu12-10.3.7.77:\n Successfully uninstalled nvidia-curand-cu12-10.3.7.77\n Attempting uninstall: nvidia-cufft-cu12\n Found existing installation: nvidia-cufft-cu12 11.3.0.4\n Uninstalling nvidia-cufft-cu12-11.3.0.4:\n Successfully uninstalled nvidia-cufft-cu12-11.3.0.4\n Attempting uninstall: nvidia-cuda-runtime-cu12\n Found existing installation: nvidia-cuda-runtime-cu12 12.6.77\n Uninstalling nvidia-cuda-runtime-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-runtime-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-nvrtc-cu12\n Found existing installation: nvidia-cuda-nvrtc-cu12 12.6.77\n Uninstalling nvidia-cuda-nvrtc-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-cupti-cu12\n Found existing installation: nvidia-cuda-cupti-cu12 12.6.80\n Uninstalling nvidia-cuda-cupti-cu12-12.6.80:\n Successfully uninstalled nvidia-cuda-cupti-cu12-12.6.80\n Attempting uninstall: nvidia-cublas-cu12\n Found existing installation: nvidia-cublas-cu12 12.6.4.1\n Uninstalling nvidia-cublas-cu12-12.6.4.1:\n Successfully uninstalled nvidia-cublas-cu12-12.6.4.1\n Attempting uninstall: nvidia-cusolver-cu12\n Found existing installation: nvidia-cusolver-cu12 11.7.1.2\n Uninstalling nvidia-cusolver-cu12-11.7.1.2:\n Successfully uninstalled nvidia-cusolver-cu12-11.7.1.2\n Attempting uninstall: nvidia-cudnn-cu12\n Found existing installation: nvidia-cudnn-cu12 9.10.2.21\n Uninstalling nvidia-cudnn-cu12-9.10.2.21:\n Successfully uninstalled nvidia-cudnn-cu12-9.10.2.21\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ncudf-cu12 25.6.0 requires pyarrow<20.0.0a0,>=14.0.0; platform_machine == \"x86_64\", but you have pyarrow 22.0.0 which is incompatible.\nfastai 2.8.4 requires fastcore<1.9,>=1.8.0, but you have fastcore 1.11.3 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mSuccessfully installed accelerate-1.12.0 diffusers-0.36.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.21.5 nvidia-nvtx-cu12-12.1.105 peft-0.18.1 safetensors-0.7.0 sympy-1.13.1 transformers-4.57.6 triton-3.1.0\nCollecting git+https://github.com/huggingface/diffusers.git\n Cloning https://github.com/huggingface/diffusers.git to /tmp/pip-req-build-farey5tk\n Running command git clone --filter=blob:none --quiet https://github.com/huggingface/diffusers.git /tmp/pip-req-build-farey5tk\n Resolved https://github.com/huggingface/diffusers.git to commit ec376293714f269947f6d9d8a572bd73040bc1a0\n Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.28.1)\nRequirement already satisfied: huggingface-hub<2.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.36.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.32.5)\nRequirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.7.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (11.3.0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers==0.37.0.dev0) (0.16.0)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (2025.10.0)\nRequirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (6.0.3)\nRequirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.67.1)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (1.2.1rc0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers==0.37.0.dev0) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (2.6.3)\nBuilding wheels for collected packages: diffusers\n Building wheel for diffusers (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n Created wheel for diffusers: filename=diffusers-0.37.0.dev0-py3-none-any.whl size=4893406 sha256=230dbccc8416b199faf6415754cf657741349710e790282da0eae7145d122845\n Stored in directory: /tmp/pip-ephem-wheel-cache-w2savifu/wheels/23/0f/7d/f97813d265ed0e599a78d83afd4e1925740896ca79b46cccfd\nSuccessfully built diffusers\nInstalling collected packages: diffusers\n Attempting uninstall: diffusers\n Found existing installation: diffusers 0.36.0\n Uninstalling diffusers-0.36.0:\n Successfully uninstalled diffusers-0.36.0\nSuccessfully installed diffusers-0.37.0.dev0\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"# CELL 2 β Verify\n\nimport torch, diffusers\n\nprint(\"Torch:\", torch.__version__)\nprint(\"Diffusers:\", diffusers.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\nprint(\"GPU:\", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"None\")\n\nfrom diffusers import Flux2KleinPipeline\nprint(\"Flux2KleinPipeline OK\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:07.830231Z","iopub.execute_input":"2026-01-21T05:11:07.830498Z","iopub.status.idle":"2026-01-21T05:11:34.530651Z","shell.execute_reply.started":"2026-01-21T05:11:07.830467Z","shell.execute_reply":"2026-01-21T05:11:34.529728Z"}},"outputs":[{"name":"stdout","text":"Torch: 2.5.1+cu121\nDiffusers: 0.37.0.dev0\nCUDA: True\nGPU: Tesla P100-PCIE-16GB\n","output_type":"stream"},{"name":"stderr","text":"2026-01-21 05:11:20.477776: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1768972280.686105 55 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1768972280.747505 55 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nW0000 00:00:1768972281.239000 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768972281.239052 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768972281.239055 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768972281.239058 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\nFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\n","output_type":"stream"},{"name":"stdout","text":"Flux2KleinPipeline OK\n","output_type":"stream"}],"execution_count":2},{"cell_type":"code","source":"# CELL 3 β Config\n\nimport os\n\ndevice = \"cuda\"\ndtype = torch.float16\n\nDATASET_NAME = \"image-caption-dataset\" # change if needed\n\nCAPTIONS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_captions.json\"\nLATENTS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_latents.safetensors\"\n\nCACHE_DIR = \"/kaggle/working/cache\"\nSAVE_DIR = \"/kaggle/working/flux_klein_lora\"\n\nos.makedirs(CACHE_DIR, exist_ok=True)\nos.makedirs(SAVE_DIR, exist_ok=True)\n\n# training\nACCUM_STEPS = 2\nALPHA = 16\n#--#\nLR = 2e-5\nSTEPS = 1000 # or more\nRANK = 16 # better for FLUX\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:34.531906Z","iopub.execute_input":"2026-01-21T05:11:34.532781Z","iopub.status.idle":"2026-01-21T05:11:34.538417Z","shell.execute_reply.started":"2026-01-21T05:11:34.532750Z","shell.execute_reply":"2026-01-21T05:11:34.537579Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"# CELL 4 β Load captions + latents\n\nimport json\nfrom safetensors.torch import load_file\n\nwith open(CAPTIONS_PATH) as f:\n captions = json.load(f)\n\nlatents = load_file(LATENTS_PATH)\n\nkeys = list(captions.keys())\nprint(\"Samples:\", len(keys))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:34.539875Z","iopub.execute_input":"2026-01-21T05:11:34.540191Z","iopub.status.idle":"2026-01-21T05:11:35.046899Z","shell.execute_reply.started":"2026-01-21T05:11:34.540155Z","shell.execute_reply":"2026-01-21T05:11:35.045958Z"}},"outputs":[{"name":"stdout","text":"Samples: 125\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"# CELL 5 β Dataset (returns latent + key)\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nclass FluxLatentDataset(Dataset):\n def __len__(self):\n return len(keys)\n\n def __getitem__(self, idx):\n k = keys[idx]\n return latents[f\"{k}\"], k # <-- return KEY, not caption\n\ndataset = FluxLatentDataset()\nloader = DataLoader(dataset, batch_size=1, shuffle=True)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:35.048565Z","iopub.execute_input":"2026-01-21T05:11:35.048853Z","iopub.status.idle":"2026-01-21T05:11:35.055112Z","shell.execute_reply.started":"2026-01-21T05:11:35.048824Z","shell.execute_reply":"2026-01-21T05:11:35.054378Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"# =========================================================\n# CELL 6 β Encode text on GPU and CACHE FLUX-READY embeddings\n# =========================================================\n\nimport torch, gc\nfrom transformers import AutoTokenizer, AutoModel\n\nMODEL_ID_TEXT = \"black-forest-labs/FLUX.2-klein-base-4B\"\n\n# load tokenizer + text encoder ONLY\ntokenizer = AutoTokenizer.from_pretrained(\n MODEL_ID_TEXT,\n subfolder=\"tokenizer\",\n trust_remote_code=True,\n cache_dir=CACHE_DIR\n)\n\ntext_encoder = AutoModel.from_pretrained(\n MODEL_ID_TEXT,\n subfolder=\"text_encoder\",\n trust_remote_code=True,\n dtype=torch.float16,\n cache_dir=CACHE_DIR\n).to(\"cuda\")\n\ntext_encoder.eval()\n\ntext_cache = {}\n\nwith torch.no_grad():\n for k, caption in captions.items():\n\n inputs = tokenizer(\n caption,\n padding=\"max_length\",\n truncation=True,\n max_length=128,\n return_tensors=\"pt\"\n ).to(\"cuda\")\n\n outputs = text_encoder(\n **inputs,\n output_hidden_states=True,\n return_dict=True,\n )\n\n # last hidden layer\n txt_hidden = outputs.hidden_states[-1] # [1, T, 2560]\n\n # expand to FLUX context width (3Γ)\n txt_hidden = txt_hidden.repeat(1, 1, 3) # β [1, T, 7680]\n\n text_cache[k] = txt_hidden.cpu()\n\nprint(\"Cached FLUX-ready text embeddings.\")\n\n# free GPU\ndel text_encoder\ntorch.cuda.empty_cache()\ngc.collect()\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:35.056313Z","iopub.execute_input":"2026-01-21T05:11:35.056682Z","iopub.status.idle":"2026-01-21T05:12:32.093604Z","shell.execute_reply.started":"2026-01-21T05:11:35.056647Z","shell.execute_reply":"2026-01-21T05:12:32.092931Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8b0bcd8528ed43a8ad9ff4b02c66c498"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vocab.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"4ba534424a914efa91f65e66c3b5484a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"merges.txt: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"c24f1ae5c6d34d7aaac756f9571d38cc"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer/tokenizer.json: 0%| | 0.00/11.4M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b2cb7e792e3646a887df0c7bac08b8c1"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"added_tokens.json: 0%| | 0.00/707 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"3681370cbe8045d4bb70956d80a4bf0c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"special_tokens_map.json: 0%| | 0.00/613 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"22a6292ea8fd43d6ae547a47832b3647"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"chat_template.jinja: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"80763ee39a004509bacb0ae747681a0b"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"aaf483eae33646a2af7e76d70f805d18"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"model.safetensors.index.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"ea39ca9399fb4b518d6ccddbc1e15671"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 2 files: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a34d46100626400f8752ed188b63b0c4"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00001-of-00002.safete(β¦): 0%| | 0.00/4.97G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"af9552a841104954a83ffaf552a180e3"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00002-of-00002.safete(β¦): 0%| | 0.00/3.08G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2c6ce4b9e2db43fc926f16344773039c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8ccb471865e144d5a60a01125c93e3bb"}},"metadata":{}},{"name":"stdout","text":"Cached FLUX-ready text embeddings.\n","output_type":"stream"},{"execution_count":6,"output_type":"execute_result","data":{"text/plain":"8683"},"metadata":{}}],"execution_count":6},{"cell_type":"code","source":"# =========================================================\n# CELL 7 β Load FLUX Transformer and Attach LoRA (ALL LINEAR)\n# =========================================================\n\nimport torch, gc\nfrom diffusers import Flux2KleinPipeline\nfrom peft import LoraConfig, get_peft_model\n\nMODEL_ID = MODEL_ID_TEXT\n\npipe = Flux2KleinPipeline.from_pretrained(\n MODEL_ID,\n torch_dtype=torch.float16,\n cache_dir=CACHE_DIR,\n)\n\n# keep ONLY transformer\npipe.text_encoder = None\npipe.vae = None\npipe.scheduler = None\n\ntransformer = pipe.transformer\n\n# ---------------- LoRA CONFIG ----------------\nlora_config = LoraConfig(\n r=16,\n lora_alpha=16,\n target_modules=\"all-linear\", # β
THIS IS THE FIX\n lora_dropout=0.05,\n bias=\"none\",\n)\n\ntransformer = get_peft_model(transformer, lora_config)\ntransformer.enable_gradient_checkpointing()\n\npipe.transformer = transformer\n\ntrainable = sum(p.numel() for p in transformer.parameters() if p.requires_grad)\ntotal = sum(p.numel() for p in transformer.parameters())\n\nprint(f\"Trainable params: {trainable/1e6:.2f}M / {total/1e6:.2f}M\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:12:32.095340Z","iopub.execute_input":"2026-01-21T05:12:32.095839Z","iopub.status.idle":"2026-01-21T05:13:39.579302Z","shell.execute_reply.started":"2026-01-21T05:12:32.095811Z","shell.execute_reply":"2026-01-21T05:13:39.578289Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"model_index.json: 0%| | 0.00/422 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"e765340e67f0420b95aaaf451505e938"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 17 files: 0%| | 0/17 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5df97d9159ca4428ab3462e6a063d8ca"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"scheduler_config.json: 0%| | 0.00/486 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"84c7c1501f754f4f86a064f9974db066"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/531 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"826bf310e82d4d25aba5b82cfe5d19a4"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vae/diffusion_pytorch_model.safetensors: 0%| | 0.00/168M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"fb62dd7c54ab45b5a4a52324ac8c26a8"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/821 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"322685b31baa4521bb226662a1f45d35"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"transformer/diffusion_pytorch_model.safe(β¦): 0%| | 0.00/7.75G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5639d5383af6437ab19d9eee1909247c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a983bbbf499845a289358f15ab5b886e"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2bbbabe944cd4b3a98725f03f2c6fb2d"}},"metadata":{}},{"name":"stdout","text":"Trainable params: 25.54M / 3901.09M\n","output_type":"stream"}],"execution_count":7},{"cell_type":"code","source":"# CELL 8 β Move Transformer to GPU Only\nimport gc, torch\n\npipe.transformer = pipe.transformer.to(\"cuda\")\n\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint(\"Transformer loaded on GPU.\")\n\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:13:46.899270Z","iopub.execute_input":"2026-01-21T05:13:46.900127Z","iopub.status.idle":"2026-01-21T05:13:47.298989Z","shell.execute_reply.started":"2026-01-21T05:13:46.900056Z","shell.execute_reply":"2026-01-21T05:13:47.298134Z"}},"outputs":[{"name":"stdout","text":"Transformer loaded on GPU.\n","output_type":"stream"}],"execution_count":9},{"cell_type":"code","source":"# CELL 9 β Flow Matching Training (FLUX Klein LoRA)\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import trange\nimport os\n\n# ---------------- Config ----------------\n\nSTEPS = 100\nLR = 1e-4\nFLOW_T_SCALE = 50.0\nCLAMP_VAL = 3.0\nGRAD_CLIP = 0.5\n\nSAVE_DIR = \"./flux_klein_lora\"\n\ndevice = \"cuda\"\ndtype = torch.float16\n\npipe.transformer.train()\n\n# ---------------- Helpers ----------------\n\ndef patchify_latents(latents):\n # latents: [B, 32, 128, 128]\n B, C, H, W = latents.shape\n latents = latents.reshape(B, C, H//2, 2, W//2, 2)\n latents = latents.permute(0, 2, 4, 1, 3, 5).contiguous()\n return latents.view(B, (H//2)*(W//2), C*2*2) # [B, 4096, 128]\n\n\ndef generate_flux_pos_ids(batch, ph, pw, txt_len, device, dtype):\n # image positions\n y = torch.linspace(0, 1, ph, device=device, dtype=dtype)\n x = torch.linspace(0, 1, pw, device=device, dtype=dtype)\n gy, gx = torch.meshgrid(y, x, indexing=\"ij\")\n\n pos_y = gy.flatten()\n pos_x = gx.flatten()\n zeros = torch.zeros_like(pos_y)\n\n img_ids = torch.stack([pos_y, pos_x, zeros, zeros], dim=-1)\n img_ids = img_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n # text positions\n t = torch.arange(txt_len, device=device, dtype=dtype) / txt_len\n zeros_t = torch.zeros_like(t)\n\n txt_ids = torch.stack([t, zeros_t, zeros_t, zeros_t], dim=-1)\n txt_ids = txt_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n return img_ids, txt_ids\n\n\n# ---------------- Optimizer ----------------\n\ntrainable_params = [p for p in pipe.transformer.parameters() if p.requires_grad]\n\noptimizer = torch.optim.AdamW(\n trainable_params,\n lr=LR,\n betas=(0.9, 0.999),\n weight_decay=1e-4,\n)\n\ndata_iter = iter(loader)\n\n# ---------------- Training Loop ----------------\n\nfor step in trange(STEPS, desc=\"Training (Flow Matching)\"):\n\n try:\n latent_b, key = next(data_iter)\n except StopIteration:\n data_iter = iter(loader)\n latent_b, key = next(data_iter)\n\n latent_b = latent_b.squeeze(0).to(device, dtype=dtype)\n enc_b = text_cache[key[0]].to(device, dtype=dtype) # [1, T, 7680]\n\n if step == 0:\n print(\"latent:\", latent_b.shape)\n print(\"text:\", enc_b.shape)\n\n # ---- patchify latents ----\n tokens = patchify_latents(latent_b)\n tokens = torch.clamp(tokens, -CLAMP_VAL, CLAMP_VAL)\n\n # ---- flow matching noise ----\n eps = torch.randn_like(tokens)\n eps = torch.clamp(eps, -CLAMP_VAL, CLAMP_VAL)\n\n t = torch.rand(tokens.size(0), device=device, dtype=dtype)\n\n z_t = (1 - t[:, None, None]) * eps + t[:, None, None] * tokens\n target = tokens - eps\n t_embed = t * FLOW_T_SCALE\n\n # ---- positional ids ----\n img_ids, txt_ids = generate_flux_pos_ids(\n tokens.size(0), 64, 64, enc_b.size(1), device, dtype\n )\n\n # ---- forward ----\n with torch.autocast(\"cuda\", dtype=torch.float16):\n pred = pipe.transformer(\n hidden_states=z_t,\n timestep=t_embed,\n encoder_hidden_states=enc_b,\n img_ids=img_ids,\n txt_ids=txt_ids,\n return_dict=False,\n )[0]\n\n loss = F.mse_loss(pred.float(), target.float())\n\n # ---- backward ----\n loss.backward()\n torch.nn.utils.clip_grad_norm_(trainable_params, GRAD_CLIP)\n optimizer.step()\n optimizer.zero_grad(set_to_none=True)\n\n if step % 25 == 0:\n print(f\"Step {step} | Loss {loss.item():.6f}\")\n\n# ---------------- Save LoRA ----------------\n\nos.makedirs(SAVE_DIR, exist_ok=True)\npipe.transformer.save_pretrained(SAVE_DIR)\n\nprint(\"LoRA saved to:\", SAVE_DIR)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:13:56.950505Z","iopub.execute_input":"2026-01-21T05:13:56.951540Z"}},"outputs":[{"name":"stderr","text":"Training (Flow Matching): 0%| | 0/100 [00:00<?, ?it/s]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 1%| | 1/100 [00:23<39:22, 23.86s/it]","output_type":"stream"},{"name":"stdout","text":"Step 0 | Loss 3.835143\n","output_type":"stream"},{"name":"stderr","text":"Training (Flow Matching): 13%|ββ | 13/100 [04:59<33:33, 23.14s/it]","output_type":"stream"}],"execution_count":null}]}
|