Datasets:
Tags:
Not-For-All-Audiences
Upload adcom-flux-klein-4b-lora-training.ipynb
Browse files
adcom-flux-klein-4b-lora-training.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":14564379,"sourceType":"datasetVersion","datasetId":8022630}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"HOW TO USE:\n1) Upload a civitai dataset .zip file to your google drive named kaggleset.zip \n2) Use the https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_latent.ipynb notebook \nto convert this dataset to flux_captions.json and flux_latents.safetensors (saved to your drive upon running the script)\n3) Create a private dataset called image-caption-dataset\n4) Add the flux_captions.json and flux_latents.safetensor to this dataset\n5) In this notebook , press the '+ Add input' button and select your private dataset\n6) Run this notebook\n//----//\nIf you have ideas on improvements / developments on FLUX Klein 4B LoRa \ntraining let me know in the comment section of this repo","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# CELL 1 β Install correct versions\n\n!pip uninstall -y torch torchvision torchaudio diffusers accelerate peft transformers\n\n!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n\n!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n\n!pip install git+https://github.com/huggingface/diffusers.git\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:07:52.931255Z","iopub.execute_input":"2026-01-21T05:07:52.931584Z","iopub.status.idle":"2026-01-21T05:11:07.828035Z","shell.execute_reply.started":"2026-01-21T05:07:52.931548Z","shell.execute_reply":"2026-01-21T05:11:07.827200Z"},"collapsed":true,"jupyter":{"outputs_hidden":true}},"outputs":[{"name":"stdout","text":"Found existing installation: torch 2.8.0+cu126\nUninstalling torch-2.8.0+cu126:\n Successfully uninstalled torch-2.8.0+cu126\nFound existing installation: torchvision 0.23.0+cu126\nUninstalling torchvision-0.23.0+cu126:\n Successfully uninstalled torchvision-0.23.0+cu126\nFound existing installation: torchaudio 2.8.0+cu126\nUninstalling torchaudio-2.8.0+cu126:\n Successfully uninstalled torchaudio-2.8.0+cu126\nFound existing installation: diffusers 0.35.2\nUninstalling diffusers-0.35.2:\n Successfully uninstalled diffusers-0.35.2\nFound existing installation: accelerate 1.11.0\nUninstalling accelerate-1.11.0:\n Successfully uninstalled accelerate-1.11.0\nFound existing installation: peft 0.17.1\nUninstalling peft-0.17.1:\n Successfully uninstalled peft-0.17.1\nFound existing installation: transformers 4.57.1\nUninstalling transformers-4.57.1:\n Successfully uninstalled transformers-4.57.1\nLooking in indexes: https://download.pytorch.org/whl/cu121\nCollecting torch==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (780.4 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m780.4/780.4 MB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchvision==0.20.1\n Downloading https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp312-cp312-linux_x86_64.whl (7.3 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m7.3/7.3 MB\u001b[0m \u001b[31m5.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m0:00:01\u001b[0m\n\u001b[?25hCollecting torchaudio==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torchaudio-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (3.4 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m78.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m\n\u001b[?25hInstalling collected packages: torchaudio, torchvision, torch\nSuccessfully installed torch-2.5.1+cu121 torchaudio-2.5.1+cu121 torchvision-0.20.1+cu121\nCollecting diffusers\n Downloading diffusers-0.36.0-py3-none-any.whl.metadata (20 kB)\nCollecting transformers\n Downloading transformers-4.57.6-py3-none-any.whl.metadata (43 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m44.0/44.0 kB\u001b[0m \u001b[31m30.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting accelerate\n Downloading accelerate-1.12.0-py3-none-any.whl.metadata (19 kB)\nCollecting peft\n Downloading peft-0.18.1-py3-none-any.whl.metadata (14 kB)\nRequirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (0.6.2)\nCollecting safetensors\n Downloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\nRequirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\nCollecting huggingface-hub\n Downloading huggingface_hub-1.3.2-py3-none-any.whl.metadata (13 kB)\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.28.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.32.5)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers) (11.3.0)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers) (6.0.3)\nRequirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (0.22.1)\nRequirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate) (5.9.5)\nRequirement already satisfied: torch>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from accelerate) (2.5.1+cu121)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (2025.10.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (1.2.1rc0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers) (0.16.0)\nRequirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.1.6)\nCollecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cudnn-cu12==9.1.0.70 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cublas-cu12==12.1.3.1 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cufft-cu12==11.0.2.54 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-curand-cu12==10.3.2.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-nccl-cu12==2.21.5 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl.metadata (1.8 kB)\nCollecting nvidia-nvtx-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.7 kB)\nCollecting triton==3.1.0 (from torch>=2.0.0->accelerate)\n Downloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.3 kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (75.2.0)\nCollecting sympy==1.13.1 (from torch>=2.0.0->accelerate)\n Downloading sympy-1.13.1-py3-none-any.whl.metadata (12 kB)\nRequirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.12/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->accelerate) (12.6.85)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy==1.13.1->torch>=2.0.0->accelerate) (1.3.0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (2.6.3)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=2.0.0->accelerate) (3.0.3)\nDownloading diffusers-0.36.0-py3-none-any.whl (4.6 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m4.6/4.6 MB\u001b[0m \u001b[31m69.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading transformers-4.57.6-py3-none-any.whl (12.0 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m221.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading accelerate-1.12.0-py3-none-any.whl (380 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m380.9/380.9 kB\u001b[0m \u001b[31m348.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading peft-0.18.1-py3-none-any.whl (556 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m557.0/557.0 kB\u001b[0m \u001b[31m333.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m507.2/507.2 kB\u001b[0m \u001b[31m307.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m273.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m204.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m203.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m346.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m286.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m297.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m194.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m226.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m195.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl (188.7 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m188.7/188.7 MB\u001b[0m \u001b[31m181.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m282.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading sympy-1.13.1-py3-none-any.whl (6.2 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m6.2/6.2 MB\u001b[0m \u001b[31m302.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (209.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m209.6/209.6 MB\u001b[0m \u001b[31m217.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hInstalling collected packages: triton, sympy, safetensors, nvidia-nvtx-cu12, nvidia-nccl-cu12, nvidia-cusparse-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusolver-cu12, nvidia-cudnn-cu12, diffusers, transformers, accelerate, peft\n Attempting uninstall: triton\n Found existing installation: triton 3.4.0\n Uninstalling triton-3.4.0:\n Successfully uninstalled triton-3.4.0\n Attempting uninstall: sympy\n Found existing installation: sympy 1.13.3\n Uninstalling sympy-1.13.3:\n Successfully uninstalled sympy-1.13.3\n Attempting uninstall: safetensors\n Found existing installation: safetensors 0.6.2\n Uninstalling safetensors-0.6.2:\n Successfully uninstalled safetensors-0.6.2\n Attempting uninstall: nvidia-nvtx-cu12\n Found existing installation: nvidia-nvtx-cu12 12.6.77\n Uninstalling nvidia-nvtx-cu12-12.6.77:\n Successfully uninstalled nvidia-nvtx-cu12-12.6.77\n Attempting uninstall: nvidia-nccl-cu12\n Found existing installation: nvidia-nccl-cu12 2.27.3\n Uninstalling nvidia-nccl-cu12-2.27.3:\n Successfully uninstalled nvidia-nccl-cu12-2.27.3\n Attempting uninstall: nvidia-cusparse-cu12\n Found existing installation: nvidia-cusparse-cu12 12.5.4.2\n Uninstalling nvidia-cusparse-cu12-12.5.4.2:\n Successfully uninstalled nvidia-cusparse-cu12-12.5.4.2\n Attempting uninstall: nvidia-curand-cu12\n Found existing installation: nvidia-curand-cu12 10.3.7.77\n Uninstalling nvidia-curand-cu12-10.3.7.77:\n Successfully uninstalled nvidia-curand-cu12-10.3.7.77\n Attempting uninstall: nvidia-cufft-cu12\n Found existing installation: nvidia-cufft-cu12 11.3.0.4\n Uninstalling nvidia-cufft-cu12-11.3.0.4:\n Successfully uninstalled nvidia-cufft-cu12-11.3.0.4\n Attempting uninstall: nvidia-cuda-runtime-cu12\n Found existing installation: nvidia-cuda-runtime-cu12 12.6.77\n Uninstalling nvidia-cuda-runtime-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-runtime-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-nvrtc-cu12\n Found existing installation: nvidia-cuda-nvrtc-cu12 12.6.77\n Uninstalling nvidia-cuda-nvrtc-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-cupti-cu12\n Found existing installation: nvidia-cuda-cupti-cu12 12.6.80\n Uninstalling nvidia-cuda-cupti-cu12-12.6.80:\n Successfully uninstalled nvidia-cuda-cupti-cu12-12.6.80\n Attempting uninstall: nvidia-cublas-cu12\n Found existing installation: nvidia-cublas-cu12 12.6.4.1\n Uninstalling nvidia-cublas-cu12-12.6.4.1:\n Successfully uninstalled nvidia-cublas-cu12-12.6.4.1\n Attempting uninstall: nvidia-cusolver-cu12\n Found existing installation: nvidia-cusolver-cu12 11.7.1.2\n Uninstalling nvidia-cusolver-cu12-11.7.1.2:\n Successfully uninstalled nvidia-cusolver-cu12-11.7.1.2\n Attempting uninstall: nvidia-cudnn-cu12\n Found existing installation: nvidia-cudnn-cu12 9.10.2.21\n Uninstalling nvidia-cudnn-cu12-9.10.2.21:\n Successfully uninstalled nvidia-cudnn-cu12-9.10.2.21\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ncudf-cu12 25.6.0 requires pyarrow<20.0.0a0,>=14.0.0; platform_machine == \"x86_64\", but you have pyarrow 22.0.0 which is incompatible.\nfastai 2.8.4 requires fastcore<1.9,>=1.8.0, but you have fastcore 1.11.3 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mSuccessfully installed accelerate-1.12.0 diffusers-0.36.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.21.5 nvidia-nvtx-cu12-12.1.105 peft-0.18.1 safetensors-0.7.0 sympy-1.13.1 transformers-4.57.6 triton-3.1.0\nCollecting git+https://github.com/huggingface/diffusers.git\n Cloning https://github.com/huggingface/diffusers.git to /tmp/pip-req-build-farey5tk\n Running command git clone --filter=blob:none --quiet https://github.com/huggingface/diffusers.git /tmp/pip-req-build-farey5tk\n Resolved https://github.com/huggingface/diffusers.git to commit ec376293714f269947f6d9d8a572bd73040bc1a0\n Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.28.1)\nRequirement already satisfied: huggingface-hub<2.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.36.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.32.5)\nRequirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.7.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (11.3.0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers==0.37.0.dev0) (0.16.0)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (2025.10.0)\nRequirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (6.0.3)\nRequirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.67.1)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (1.2.1rc0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers==0.37.0.dev0) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (2.6.3)\nBuilding wheels for collected packages: diffusers\n Building wheel for diffusers (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n Created wheel for diffusers: filename=diffusers-0.37.0.dev0-py3-none-any.whl size=4893406 sha256=230dbccc8416b199faf6415754cf657741349710e790282da0eae7145d122845\n Stored in directory: /tmp/pip-ephem-wheel-cache-w2savifu/wheels/23/0f/7d/f97813d265ed0e599a78d83afd4e1925740896ca79b46cccfd\nSuccessfully built diffusers\nInstalling collected packages: diffusers\n Attempting uninstall: diffusers\n Found existing installation: diffusers 0.36.0\n Uninstalling diffusers-0.36.0:\n Successfully uninstalled diffusers-0.36.0\nSuccessfully installed diffusers-0.37.0.dev0\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"# CELL 2 β Verify\n\nimport torch, diffusers\n\nprint(\"Torch:\", torch.__version__)\nprint(\"Diffusers:\", diffusers.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\nprint(\"GPU:\", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"None\")\n\nfrom diffusers import Flux2KleinPipeline\nprint(\"Flux2KleinPipeline OK\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:07.830231Z","iopub.execute_input":"2026-01-21T05:11:07.830498Z","iopub.status.idle":"2026-01-21T05:11:34.530651Z","shell.execute_reply.started":"2026-01-21T05:11:07.830467Z","shell.execute_reply":"2026-01-21T05:11:34.529728Z"}},"outputs":[{"name":"stdout","text":"Torch: 2.5.1+cu121\nDiffusers: 0.37.0.dev0\nCUDA: True\nGPU: Tesla P100-PCIE-16GB\n","output_type":"stream"},{"name":"stderr","text":"2026-01-21 05:11:20.477776: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1768972280.686105 55 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1768972280.747505 55 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\nW0000 00:00:1768972281.239000 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768972281.239052 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768972281.239055 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nW0000 00:00:1768972281.239058 55 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\nFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\nFlax classes are deprecated and will be removed in Diffusers v1.0.0. We recommend migrating to PyTorch classes or pinning your version of Diffusers.\n","output_type":"stream"},{"name":"stdout","text":"Flux2KleinPipeline OK\n","output_type":"stream"}],"execution_count":2},{"cell_type":"code","source":"# CELL 3 β Config\n\nimport os\n\ndevice = \"cuda\"\ndtype = torch.float16\n\nDATASET_NAME = \"image-caption-dataset\" # change if needed\n\nCAPTIONS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_captions.json\"\nLATENTS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_latents.safetensors\"\n\nCACHE_DIR = \"/kaggle/working/cache\"\nSAVE_DIR = \"/kaggle/working/flux_klein_lora\"\n\nos.makedirs(CACHE_DIR, exist_ok=True)\nos.makedirs(SAVE_DIR, exist_ok=True)\n\n# training\nACCUM_STEPS = 2\nALPHA = 16\n#--#\nLR = 2e-5\nSTEPS = 1000 # or more\nRANK = 16 # better for FLUX\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:34.531906Z","iopub.execute_input":"2026-01-21T05:11:34.532781Z","iopub.status.idle":"2026-01-21T05:11:34.538417Z","shell.execute_reply.started":"2026-01-21T05:11:34.532750Z","shell.execute_reply":"2026-01-21T05:11:34.537579Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"# CELL 4 β Load captions + latents\n\nimport json\nfrom safetensors.torch import load_file\n\nwith open(CAPTIONS_PATH) as f:\n captions = json.load(f)\n\nlatents = load_file(LATENTS_PATH)\n\nkeys = list(captions.keys())\nprint(\"Samples:\", len(keys))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:34.539875Z","iopub.execute_input":"2026-01-21T05:11:34.540191Z","iopub.status.idle":"2026-01-21T05:11:35.046899Z","shell.execute_reply.started":"2026-01-21T05:11:34.540155Z","shell.execute_reply":"2026-01-21T05:11:35.045958Z"}},"outputs":[{"name":"stdout","text":"Samples: 125\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"# CELL 5 β Dataset (returns latent + key)\n\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nclass FluxLatentDataset(Dataset):\n def __len__(self):\n return len(keys)\n\n def __getitem__(self, idx):\n k = keys[idx]\n return latents[f\"{k}\"], k # <-- return KEY, not caption\n\ndataset = FluxLatentDataset()\nloader = DataLoader(dataset, batch_size=1, shuffle=True)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:35.048565Z","iopub.execute_input":"2026-01-21T05:11:35.048853Z","iopub.status.idle":"2026-01-21T05:11:35.055112Z","shell.execute_reply.started":"2026-01-21T05:11:35.048824Z","shell.execute_reply":"2026-01-21T05:11:35.054378Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"# =========================================================\n# CELL 6 β Encode text on GPU and CACHE FLUX-READY embeddings\n# =========================================================\n\nimport torch, gc\nfrom transformers import AutoTokenizer, AutoModel\n\nMODEL_ID_TEXT = \"black-forest-labs/FLUX.2-klein-base-4B\"\n\n# load tokenizer + text encoder ONLY\ntokenizer = AutoTokenizer.from_pretrained(\n MODEL_ID_TEXT,\n subfolder=\"tokenizer\",\n trust_remote_code=True,\n cache_dir=CACHE_DIR\n)\n\ntext_encoder = AutoModel.from_pretrained(\n MODEL_ID_TEXT,\n subfolder=\"text_encoder\",\n trust_remote_code=True,\n dtype=torch.float16,\n cache_dir=CACHE_DIR\n).to(\"cuda\")\n\ntext_encoder.eval()\n\ntext_cache = {}\n\nwith torch.no_grad():\n for k, caption in captions.items():\n\n inputs = tokenizer(\n caption,\n padding=\"max_length\",\n truncation=True,\n max_length=128,\n return_tensors=\"pt\"\n ).to(\"cuda\")\n\n outputs = text_encoder(\n **inputs,\n output_hidden_states=True,\n return_dict=True,\n )\n\n # last hidden layer\n txt_hidden = outputs.hidden_states[-1] # [1, T, 2560]\n\n # expand to FLUX context width (3Γ)\n txt_hidden = txt_hidden.repeat(1, 1, 3) # β [1, T, 7680]\n\n text_cache[k] = txt_hidden.cpu()\n\nprint(\"Cached FLUX-ready text embeddings.\")\n\n# free GPU\ndel text_encoder\ntorch.cuda.empty_cache()\ngc.collect()\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:11:35.056313Z","iopub.execute_input":"2026-01-21T05:11:35.056682Z","iopub.status.idle":"2026-01-21T05:12:32.093604Z","shell.execute_reply.started":"2026-01-21T05:11:35.056647Z","shell.execute_reply":"2026-01-21T05:12:32.092931Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8b0bcd8528ed43a8ad9ff4b02c66c498"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vocab.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"4ba534424a914efa91f65e66c3b5484a"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"merges.txt: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"c24f1ae5c6d34d7aaac756f9571d38cc"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer/tokenizer.json: 0%| | 0.00/11.4M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b2cb7e792e3646a887df0c7bac08b8c1"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"added_tokens.json: 0%| | 0.00/707 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"3681370cbe8045d4bb70956d80a4bf0c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"special_tokens_map.json: 0%| | 0.00/613 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"22a6292ea8fd43d6ae547a47832b3647"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"chat_template.jinja: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"80763ee39a004509bacb0ae747681a0b"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"aaf483eae33646a2af7e76d70f805d18"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"model.safetensors.index.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"ea39ca9399fb4b518d6ccddbc1e15671"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 2 files: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a34d46100626400f8752ed188b63b0c4"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00001-of-00002.safete(β¦): 0%| | 0.00/4.97G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"af9552a841104954a83ffaf552a180e3"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00002-of-00002.safete(β¦): 0%| | 0.00/3.08G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2c6ce4b9e2db43fc926f16344773039c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8ccb471865e144d5a60a01125c93e3bb"}},"metadata":{}},{"name":"stdout","text":"Cached FLUX-ready text embeddings.\n","output_type":"stream"},{"execution_count":6,"output_type":"execute_result","data":{"text/plain":"8683"},"metadata":{}}],"execution_count":6},{"cell_type":"code","source":"# =========================================================\n# CELL 7 β Load FLUX Transformer and Attach LoRA (ALL LINEAR)\n# =========================================================\n\nimport torch, gc\nfrom diffusers import Flux2KleinPipeline\nfrom peft import LoraConfig, get_peft_model\n\nMODEL_ID = MODEL_ID_TEXT\n\npipe = Flux2KleinPipeline.from_pretrained(\n MODEL_ID,\n torch_dtype=torch.float16,\n cache_dir=CACHE_DIR,\n)\n\n# keep ONLY transformer\npipe.text_encoder = None\npipe.vae = None\npipe.scheduler = None\n\ntransformer = pipe.transformer\n\n# ---------------- LoRA CONFIG ----------------\nlora_config = LoraConfig(\n r=16,\n lora_alpha=16,\n target_modules=\"all-linear\", # β
THIS IS THE FIX\n lora_dropout=0.05,\n bias=\"none\",\n)\n\ntransformer = get_peft_model(transformer, lora_config)\ntransformer.enable_gradient_checkpointing()\n\npipe.transformer = transformer\n\ntrainable = sum(p.numel() for p in transformer.parameters() if p.requires_grad)\ntotal = sum(p.numel() for p in transformer.parameters())\n\nprint(f\"Trainable params: {trainable/1e6:.2f}M / {total/1e6:.2f}M\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:12:32.095340Z","iopub.execute_input":"2026-01-21T05:12:32.095839Z","iopub.status.idle":"2026-01-21T05:13:39.579302Z","shell.execute_reply.started":"2026-01-21T05:12:32.095811Z","shell.execute_reply":"2026-01-21T05:13:39.578289Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"model_index.json: 0%| | 0.00/422 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"e765340e67f0420b95aaaf451505e938"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 17 files: 0%| | 0/17 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5df97d9159ca4428ab3462e6a063d8ca"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"scheduler_config.json: 0%| | 0.00/486 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"84c7c1501f754f4f86a064f9974db066"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/531 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"826bf310e82d4d25aba5b82cfe5d19a4"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vae/diffusion_pytorch_model.safetensors: 0%| | 0.00/168M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"fb62dd7c54ab45b5a4a52324ac8c26a8"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0%| | 0.00/821 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"322685b31baa4521bb226662a1f45d35"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"transformer/diffusion_pytorch_model.safe(β¦): 0%| | 0.00/7.75G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5639d5383af6437ab19d9eee1909247c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a983bbbf499845a289358f15ab5b886e"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"2bbbabe944cd4b3a98725f03f2c6fb2d"}},"metadata":{}},{"name":"stdout","text":"Trainable params: 25.54M / 3901.09M\n","output_type":"stream"}],"execution_count":7},{"cell_type":"code","source":"# CELL 8 β Move Transformer to GPU Only\nimport gc, torch\n\npipe.transformer = pipe.transformer.to(\"cuda\")\n\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint(\"Transformer loaded on GPU.\")\n\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:13:46.899270Z","iopub.execute_input":"2026-01-21T05:13:46.900127Z","iopub.status.idle":"2026-01-21T05:13:47.298989Z","shell.execute_reply.started":"2026-01-21T05:13:46.900056Z","shell.execute_reply":"2026-01-21T05:13:47.298134Z"}},"outputs":[{"name":"stdout","text":"Transformer loaded on GPU.\n","output_type":"stream"}],"execution_count":9},{"cell_type":"code","source":"# =========================================================\n# CELL 9 β Flow Matching Training (FLUX Klein LoRA, Epoch CKPT)\n# =========================================================\n\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport os, time\n\n# ---------------- Config ----------------\n\nEPOCHS = 999999 # time limit will stop first\nSAVE_EVERY = 3 # β
save every 3 epochs\nLR = 1e-4\nFLOW_T_SCALE = 50.0\nCLAMP_VAL = 3.0\nGRAD_CLIP = 0.5\n\nSAVE_DIR = \"./flux_klein_lora\"\n\nMAX_SECONDS = (11 * 60 + 40) * 60 # β
11h 40m\n\ndevice = \"cuda\"\ndtype = torch.float16\n\npipe.transformer.train()\n\n# ---------------- Helpers ----------------\n\ndef patchify_latents(latents):\n B, C, H, W = latents.shape\n latents = latents.reshape(B, C, H//2, 2, W//2, 2)\n latents = latents.permute(0, 2, 4, 1, 3, 5).contiguous()\n return latents.view(B, (H//2)*(W//2), C*2*2) # [B, 4096, 128]\n\n\ndef generate_flux_pos_ids(batch, ph, pw, txt_len, device, dtype):\n y = torch.linspace(0, 1, ph, device=device, dtype=dtype)\n x = torch.linspace(0, 1, pw, device=device, dtype=dtype)\n gy, gx = torch.meshgrid(y, x, indexing=\"ij\")\n\n pos_y = gy.flatten()\n pos_x = gx.flatten()\n zeros = torch.zeros_like(pos_y)\n\n img_ids = torch.stack([pos_y, pos_x, zeros, zeros], dim=-1)\n img_ids = img_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n t = torch.arange(txt_len, device=device, dtype=dtype) / txt_len\n zeros_t = torch.zeros_like(t)\n\n txt_ids = torch.stack([t, zeros_t, zeros_t, zeros_t], dim=-1)\n txt_ids = txt_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n return img_ids, txt_ids\n\n\n# ---------------- Optimizer ----------------\n\ntrainable_params = [p for p in pipe.transformer.parameters() if p.requires_grad]\n\noptimizer = torch.optim.AdamW(\n trainable_params,\n lr=LR,\n betas=(0.9, 0.999),\n weight_decay=1e-4,\n)\n\n# ---------------- Training ----------------\n\nstart_time = time.time()\nglobal_step = 0\n\nprint(\"β±οΈ Training started. Max time:\", MAX_SECONDS / 3600, \"hours\")\n\nfor epoch in range(1, EPOCHS + 1):\n\n print(f\"\\n===== Epoch {epoch} =====\")\n epoch_loss = 0.0\n\n for latent_b, key in tqdm(loader, desc=f\"Epoch {epoch}\"):\n\n # ---- time check ----\n elapsed = time.time() - start_time\n if elapsed >= MAX_SECONDS:\n print(f\"\\nβ° Time limit reached ({elapsed/3600:.2f}h). Stopping.\")\n break\n\n latent_b = latent_b.squeeze(0).to(device, dtype=dtype)\n enc_b = text_cache[key[0]].to(device, dtype=dtype) # [1, T, 7680]\n\n # ---- patchify latents ----\n tokens = patchify_latents(latent_b)\n tokens = torch.clamp(tokens, -CLAMP_VAL, CLAMP_VAL)\n\n # ---- flow matching noise ----\n eps = torch.randn_like(tokens)\n eps = torch.clamp(eps, -CLAMP_VAL, CLAMP_VAL)\n\n t = torch.rand(tokens.size(0), device=device, dtype=dtype)\n\n z_t = (1 - t[:, None, None]) * eps + t[:, None, None] * tokens\n target = tokens - eps\n t_embed = t * FLOW_T_SCALE\n\n # ---- positional ids ----\n img_ids, txt_ids = generate_flux_pos_ids(\n tokens.size(0), 64, 64, enc_b.size(1), device, dtype\n )\n\n # ---- forward ----\n with torch.autocast(\"cuda\", dtype=torch.float16):\n pred = pipe.transformer(\n hidden_states=z_t,\n timestep=t_embed,\n encoder_hidden_states=enc_b,\n img_ids=img_ids,\n txt_ids=txt_ids,\n return_dict=False,\n )[0]\n\n loss = F.mse_loss(pred.float(), target.float())\n\n # ---- backward ----\n loss.backward()\n torch.nn.utils.clip_grad_norm_(trainable_params, GRAD_CLIP)\n optimizer.step()\n optimizer.zero_grad(set_to_none=True)\n\n epoch_loss += loss.item()\n global_step += 1\n\n # ---- epoch summary ----\n avg_loss = epoch_loss / max(1, len(loader))\n print(f\"Epoch {epoch} | Avg Loss: {avg_loss:.6f}\")\n\n # ---- save every N epochs ----\n if epoch % SAVE_EVERY == 0:\n save_path = os.path.join(SAVE_DIR, f\"epoch_{epoch:03d}\")\n os.makedirs(save_path, exist_ok=True)\n pipe.transformer.save_pretrained(save_path)\n print(\"πΎ Saved LoRA checkpoint to:\", save_path)\n\n # ---- time exit ----\n if (time.time() - start_time) >= MAX_SECONDS:\n break\n\n\n# ---------------- Final Save ----------------\n\nfinal_path = os.path.join(SAVE_DIR, \"final\")\nos.makedirs(final_path, exist_ok=True)\npipe.transformer.save_pretrained(final_path)\n\nprint(\"β
Final LoRA saved to:\", final_path)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T05:59:18.732075Z","iopub.execute_input":"2026-01-21T05:59:18.732478Z","execution_failed":"2026-01-21T06:00:19.034Z"}},"outputs":[{"name":"stdout","text":"β±οΈ Training started. Max time: 11.666666666666666 hours\n\n===== Epoch 1 =====\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 2%|β | 2/125 [00:46<47:20, 23.09s/it]","output_type":"stream"}],"execution_count":null}]}
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.12.12","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[{"sourceId":14564379,"sourceType":"datasetVersion","datasetId":8022630}],"dockerImageVersionId":31260,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"HOW TO USE:\n1) Upload a civitai dataset .zip file to your google drive named kaggleset.zip \n2) Use the https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/civit_dataset_to_latent.ipynb notebook \nto convert this dataset to flux_captions.json and flux_latents.safetensors (saved to your drive upon running the script)\n3) Create a private dataset called image-caption-dataset\n4) Add the flux_captions.json and flux_latents.safetensor to this dataset\n5) In this notebook , press the '+ Add input' button and select your private dataset\n6) Run this notebook\n//----//\nIf you have ideas on improvements / developments on FLUX Klein 4B LoRa \ntraining let me know in the comment section of this repo","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"#CELL 1\n!pip uninstall -y torch torchvision torchaudio diffusers accelerate peft transformers\n\n!pip install --no-deps torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https://download.pytorch.org/whl/cu121\n\n!pip install --upgrade --no-cache-dir diffusers transformers accelerate peft safetensors tqdm huggingface-hub\n\n!pip install git+https://github.com/huggingface/diffusers.git","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:41:12.253502Z","iopub.execute_input":"2026-01-21T06:41:12.253751Z","iopub.status.idle":"2026-01-21T06:44:24.039967Z","shell.execute_reply.started":"2026-01-21T06:41:12.253730Z","shell.execute_reply":"2026-01-21T06:44:24.039276Z"}},"outputs":[{"name":"stdout","text":"Found existing installation: torch 2.8.0+cu126\nUninstalling torch-2.8.0+cu126:\n Successfully uninstalled torch-2.8.0+cu126\nFound existing installation: torchvision 0.23.0+cu126\nUninstalling torchvision-0.23.0+cu126:\n Successfully uninstalled torchvision-0.23.0+cu126\nFound existing installation: torchaudio 2.8.0+cu126\nUninstalling torchaudio-2.8.0+cu126:\n Successfully uninstalled torchaudio-2.8.0+cu126\nFound existing installation: diffusers 0.35.2\nUninstalling diffusers-0.35.2:\n Successfully uninstalled diffusers-0.35.2\nFound existing installation: accelerate 1.11.0\nUninstalling accelerate-1.11.0:\n Successfully uninstalled accelerate-1.11.0\nFound existing installation: peft 0.17.1\nUninstalling peft-0.17.1:\n Successfully uninstalled peft-0.17.1\nFound existing installation: transformers 4.57.1\nUninstalling transformers-4.57.1:\n Successfully uninstalled transformers-4.57.1\nLooking in indexes: https://download.pytorch.org/whl/cu121\nCollecting torch==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (780.4 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m780.4/780.4 MB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchvision==0.20.1\n Downloading https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp312-cp312-linux_x86_64.whl (7.3 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m7.3/7.3 MB\u001b[0m \u001b[31m122.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m\n\u001b[?25hCollecting torchaudio==2.5.1\n Downloading https://download.pytorch.org/whl/cu121/torchaudio-2.5.1%2Bcu121-cp312-cp312-linux_x86_64.whl (3.4 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββοΏ½οΏ½ββββββββββββββββββ\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m95.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m:00:01\u001b[0m\n\u001b[?25hInstalling collected packages: torchaudio, torchvision, torch\nSuccessfully installed torch-2.5.1+cu121 torchaudio-2.5.1+cu121 torchvision-0.20.1+cu121\nCollecting diffusers\n Downloading diffusers-0.36.0-py3-none-any.whl.metadata (20 kB)\nCollecting transformers\n Downloading transformers-4.57.6-py3-none-any.whl.metadata (43 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m44.0/44.0 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hCollecting accelerate\n Downloading accelerate-1.12.0-py3-none-any.whl.metadata (19 kB)\nCollecting peft\n Downloading peft-0.18.1-py3-none-any.whl.metadata (14 kB)\nRequirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (0.6.2)\nCollecting safetensors\n Downloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\nRequirement already satisfied: huggingface-hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\nCollecting huggingface-hub\n Downloading huggingface_hub-1.3.2-py3-none-any.whl.metadata (13 kB)\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers) (0.28.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers) (2.32.5)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers) (11.3.0)\nRequirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers) (6.0.3)\nRequirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in /usr/local/lib/python3.12/dist-packages (from transformers) (0.22.1)\nRequirement already satisfied: psutil in /usr/local/lib/python3.12/dist-packages (from accelerate) (5.9.5)\nRequirement already satisfied: torch>=2.0.0 in /usr/local/lib/python3.12/dist-packages (from accelerate) (2.5.1+cu121)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (2025.10.0)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub) (1.2.1rc0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers) (0.16.0)\nRequirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (3.1.6)\nCollecting nvidia-cuda-nvrtc-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-runtime-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cuda-cupti-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cudnn-cu12==9.1.0.70 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cublas-cu12==12.1.3.1 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cufft-cu12==11.0.2.54 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-curand-cu12==10.3.2.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\nCollecting nvidia-cusolver-cu12==11.4.5.107 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-cusparse-cu12==12.1.0.106 (from torch>=2.0.0->accelerate)\n Downloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\nCollecting nvidia-nccl-cu12==2.21.5 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl.metadata (1.8 kB)\nCollecting nvidia-nvtx-cu12==12.1.105 (from torch>=2.0.0->accelerate)\n Downloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl.metadata (1.7 kB)\nCollecting triton==3.1.0 (from torch>=2.0.0->accelerate)\n Downloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.3 kB)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch>=2.0.0->accelerate) (75.2.0)\nCollecting sympy==1.13.1 (from torch>=2.0.0->accelerate)\n Downloading sympy-1.13.1-py3-none-any.whl.metadata (12 kB)\nRequirement already satisfied: nvidia-nvjitlink-cu12 in /usr/local/lib/python3.12/dist-packages (from nvidia-cusolver-cu12==11.4.5.107->torch>=2.0.0->accelerate) (12.6.85)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy==1.13.1->torch>=2.0.0->accelerate) (1.3.0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers) (2.6.3)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch>=2.0.0->accelerate) (3.0.3)\nDownloading diffusers-0.36.0-py3-none-any.whl (4.6 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m4.6/4.6 MB\u001b[0m \u001b[31m70.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading transformers-4.57.6-py3-none-any.whl (12.0 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m12.0/12.0 MB\u001b[0m \u001b[31m209.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading accelerate-1.12.0-py3-none-any.whl (380 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m380.9/380.9 kB\u001b[0m \u001b[31m154.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading peft-0.18.1-py3-none-any.whl (556 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m557.0/557.0 kB\u001b[0m \u001b[31m376.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading safetensors-0.7.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (507 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m507.2/507.2 kB\u001b[0m \u001b[31m199.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl (410.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m410.6/410.6 MB\u001b[0m \u001b[31m265.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (14.1 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m14.1/14.1 MB\u001b[0m \u001b[31m252.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (23.7 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m23.7/23.7 MB\u001b[0m \u001b[31m177.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (823 kB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m823.6/823.6 kB\u001b[0m \u001b[31m303.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl (664.8 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m664.8/664.8 MB\u001b[0m \u001b[31m251.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl (121.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m121.6/121.6 MB\u001b[0m \u001b[31m282.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl (56.5 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m56.5/56.5 MB\u001b[0m \u001b[31m211.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl (124.2 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m124.2/124.2 MB\u001b[0m \u001b[31m275.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl (196.0 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m196.0/196.0 MB\u001b[0m \u001b[31m198.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl (188.7 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m188.7/188.7 MB\u001b[0m \u001b[31m147.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hDownloading nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl (99 kB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m99.1/99.1 kB\u001b[0m \u001b[31m320.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n\u001b[?25hDownloading sympy-1.13.1-py3-none-any.whl (6.2 MB)\n\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m6.2/6.2 MB\u001b[0m \u001b[31m153.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0ma \u001b[36m0:00:01\u001b[0m\n\u001b[?25hDownloading triton-3.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (209.6 MB)\n\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m209.6/209.6 MB\u001b[0m \u001b[31m158.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m00:01\u001b[0m00:01\u001b[0m\n\u001b[?25hInstalling collected packages: triton, sympy, safetensors, nvidia-nvtx-cu12, nvidia-nccl-cu12, nvidia-cusparse-cu12, nvidia-curand-cu12, nvidia-cufft-cu12, nvidia-cuda-runtime-cu12, nvidia-cuda-nvrtc-cu12, nvidia-cuda-cupti-cu12, nvidia-cublas-cu12, nvidia-cusolver-cu12, nvidia-cudnn-cu12, diffusers, transformers, accelerate, peft\n Attempting uninstall: triton\n Found existing installation: triton 3.4.0\n Uninstalling triton-3.4.0:\n Successfully uninstalled triton-3.4.0\n Attempting uninstall: sympy\n Found existing installation: sympy 1.13.3\n Uninstalling sympy-1.13.3:\n Successfully uninstalled sympy-1.13.3\n Attempting uninstall: safetensors\n Found existing installation: safetensors 0.6.2\n Uninstalling safetensors-0.6.2:\n Successfully uninstalled safetensors-0.6.2\n Attempting uninstall: nvidia-nvtx-cu12\n Found existing installation: nvidia-nvtx-cu12 12.6.77\n Uninstalling nvidia-nvtx-cu12-12.6.77:\n Successfully uninstalled nvidia-nvtx-cu12-12.6.77\n Attempting uninstall: nvidia-nccl-cu12\n Found existing installation: nvidia-nccl-cu12 2.27.3\n Uninstalling nvidia-nccl-cu12-2.27.3:\n Successfully uninstalled nvidia-nccl-cu12-2.27.3\n Attempting uninstall: nvidia-cusparse-cu12\n Found existing installation: nvidia-cusparse-cu12 12.5.4.2\n Uninstalling nvidia-cusparse-cu12-12.5.4.2:\n Successfully uninstalled nvidia-cusparse-cu12-12.5.4.2\n Attempting uninstall: nvidia-curand-cu12\n Found existing installation: nvidia-curand-cu12 10.3.7.77\n Uninstalling nvidia-curand-cu12-10.3.7.77:\n Successfully uninstalled nvidia-curand-cu12-10.3.7.77\n Attempting uninstall: nvidia-cufft-cu12\n Found existing installation: nvidia-cufft-cu12 11.3.0.4\n Uninstalling nvidia-cufft-cu12-11.3.0.4:\n Successfully uninstalled nvidia-cufft-cu12-11.3.0.4\n Attempting uninstall: nvidia-cuda-runtime-cu12\n Found existing installation: nvidia-cuda-runtime-cu12 12.6.77\n Uninstalling nvidia-cuda-runtime-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-runtime-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-nvrtc-cu12\n Found existing installation: nvidia-cuda-nvrtc-cu12 12.6.77\n Uninstalling nvidia-cuda-nvrtc-cu12-12.6.77:\n Successfully uninstalled nvidia-cuda-nvrtc-cu12-12.6.77\n Attempting uninstall: nvidia-cuda-cupti-cu12\n Found existing installation: nvidia-cuda-cupti-cu12 12.6.80\n Uninstalling nvidia-cuda-cupti-cu12-12.6.80:\n Successfully uninstalled nvidia-cuda-cupti-cu12-12.6.80\n Attempting uninstall: nvidia-cublas-cu12\n Found existing installation: nvidia-cublas-cu12 12.6.4.1\n Uninstalling nvidia-cublas-cu12-12.6.4.1:\n Successfully uninstalled nvidia-cublas-cu12-12.6.4.1\n Attempting uninstall: nvidia-cusolver-cu12\n Found existing installation: nvidia-cusolver-cu12 11.7.1.2\n Uninstalling nvidia-cusolver-cu12-11.7.1.2:\n Successfully uninstalled nvidia-cusolver-cu12-11.7.1.2\n Attempting uninstall: nvidia-cudnn-cu12\n Found existing installation: nvidia-cudnn-cu12 9.10.2.21\n Uninstalling nvidia-cudnn-cu12-9.10.2.21:\n Successfully uninstalled nvidia-cudnn-cu12-9.10.2.21\n\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\ncudf-cu12 25.6.0 requires pyarrow<20.0.0a0,>=14.0.0; platform_machine == \"x86_64\", but you have pyarrow 22.0.0 which is incompatible.\nfastai 2.8.4 requires fastcore<1.9,>=1.8.0, but you have fastcore 1.11.3 which is incompatible.\u001b[0m\u001b[31m\n\u001b[0mSuccessfully installed accelerate-1.12.0 diffusers-0.36.0 nvidia-cublas-cu12-12.1.3.1 nvidia-cuda-cupti-cu12-12.1.105 nvidia-cuda-nvrtc-cu12-12.1.105 nvidia-cuda-runtime-cu12-12.1.105 nvidia-cudnn-cu12-9.1.0.70 nvidia-cufft-cu12-11.0.2.54 nvidia-curand-cu12-10.3.2.106 nvidia-cusolver-cu12-11.4.5.107 nvidia-cusparse-cu12-12.1.0.106 nvidia-nccl-cu12-2.21.5 nvidia-nvtx-cu12-12.1.105 peft-0.18.1 safetensors-0.7.0 sympy-1.13.1 transformers-4.57.6 triton-3.1.0\nCollecting git+https://github.com/huggingface/diffusers.git\n Cloning https://github.com/huggingface/diffusers.git to /tmp/pip-req-build-5qs94bpu\n Running command git clone --filter=blob:none --quiet https://github.com/huggingface/diffusers.git /tmp/pip-req-build-5qs94bpu\n Resolved https://github.com/huggingface/diffusers.git to commit ec376293714f269947f6d9d8a572bd73040bc1a0\n Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\nRequirement already satisfied: importlib_metadata in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (8.7.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (3.20.3)\nRequirement already satisfied: httpx<1.0.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.28.1)\nRequirement already satisfied: huggingface-hub<2.0,>=0.34.0 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.36.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.0.2)\nRequirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2025.11.3)\nRequirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (2.32.5)\nRequirement already satisfied: safetensors>=0.3.1 in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (0.7.0)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.12/dist-packages (from diffusers==0.37.0.dev0) (11.3.0)\nRequirement already satisfied: anyio in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (4.12.1)\nRequirement already satisfied: certifi in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (2026.1.4)\nRequirement already satisfied: httpcore==1.* in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (1.0.9)\nRequirement already satisfied: idna in /usr/local/lib/python3.12/dist-packages (from httpx<1.0.0->diffusers==0.37.0.dev0) (3.11)\nRequirement already satisfied: h11>=0.16 in /usr/local/lib/python3.12/dist-packages (from httpcore==1.*->httpx<1.0.0->diffusers==0.37.0.dev0) (0.16.0)\nRequirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (2025.10.0)\nRequirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (26.0rc2)\nRequirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (6.0.3)\nRequirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.67.1)\nRequirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (4.15.0)\nRequirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<2.0,>=0.34.0->diffusers==0.37.0.dev0) (1.2.1rc0)\nRequirement already satisfied: zipp>=3.20 in /usr/local/lib/python3.12/dist-packages (from importlib_metadata->diffusers==0.37.0.dev0) (3.23.0)\nRequirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (3.4.4)\nRequirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->diffusers==0.37.0.dev0) (2.6.3)\nBuilding wheels for collected packages: diffusers\n Building wheel for diffusers (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n Created wheel for diffusers: filename=diffusers-0.37.0.dev0-py3-none-any.whl size=4893406 sha256=76089d2f822b7c1086ff1fd07ba58a03f82c6b49ec2ea569ea3596248d511089\n Stored in directory: /tmp/pip-ephem-wheel-cache-3f4dmw0k/wheels/23/0f/7d/f97813d265ed0e599a78d83afd4e1925740896ca79b46cccfd\nSuccessfully built diffusers\nInstalling collected packages: diffusers\n Attempting uninstall: diffusers\n Found existing installation: diffusers 0.36.0\n Uninstalling diffusers-0.36.0:\n Successfully uninstalled diffusers-0.36.0\nSuccessfully installed diffusers-0.37.0.dev0\n","output_type":"stream"}],"execution_count":1},{"cell_type":"code","source":"# CELL 2 β Verify\nimport torch, diffusers\n\nprint(\"Torch:\", torch.__version__)\nprint(\"Diffusers:\", diffusers.__version__)\nprint(\"CUDA:\", torch.cuda.is_available())\nprint(\"GPU:\", torch.cuda.get_device_name(0) if torch.cuda.is_available() else \"None\")\n\nfrom diffusers import Flux2KleinPipeline\nprint(\"Flux2KleinPipeline OK\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:45:52.733371Z","iopub.execute_input":"2026-01-21T06:45:52.733920Z","iopub.status.idle":"2026-01-21T06:45:52.738746Z","shell.execute_reply.started":"2026-01-21T06:45:52.733890Z","shell.execute_reply":"2026-01-21T06:45:52.738076Z"}},"outputs":[{"name":"stdout","text":"Torch: 2.5.1+cu121\nDiffusers: 0.37.0.dev0\nCUDA: True\nGPU: Tesla P100-PCIE-16GB\nFlux2KleinPipeline OK\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"# CELL 3 β Config\nimport os\n\ndevice = \"cuda\"\ndtype = torch.float16\n\nDATASET_NAME = \"image-caption-dataset\"\n\nCAPTIONS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_captions.json\"\nLATENTS_PATH = f\"/kaggle/input/{DATASET_NAME}/flux_latents.safetensors\"\n\nCACHE_DIR = \"/kaggle/working/cache\"\nSAVE_DIR = \"/kaggle/working/flux_klein_lora\"\n\nos.makedirs(CACHE_DIR, exist_ok=True)\nos.makedirs(SAVE_DIR, exist_ok=True)\n\n# training\nRANK = 16\nALPHA = 16\nLR = 2e-5\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:45:55.735789Z","iopub.execute_input":"2026-01-21T06:45:55.736570Z","iopub.status.idle":"2026-01-21T06:45:55.741313Z","shell.execute_reply.started":"2026-01-21T06:45:55.736539Z","shell.execute_reply":"2026-01-21T06:45:55.740544Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"# CELL 4 β Load captions + latents\nimport json\nfrom safetensors.torch import load_file\n\nwith open(CAPTIONS_PATH) as f:\n captions = json.load(f)\n\nlatents = load_file(LATENTS_PATH)\n\nkeys = list(captions.keys())\nprint(\"Samples:\", len(keys))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:45:58.248254Z","iopub.execute_input":"2026-01-21T06:45:58.248884Z","iopub.status.idle":"2026-01-21T06:45:58.701994Z","shell.execute_reply.started":"2026-01-21T06:45:58.248854Z","shell.execute_reply":"2026-01-21T06:45:58.701386Z"}},"outputs":[{"name":"stdout","text":"Samples: 125\n","output_type":"stream"}],"execution_count":6},{"cell_type":"code","source":"# CELL 5 β Dataset (returns latent + key)\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nclass FluxLatentDataset(Dataset):\n def __len__(self):\n return len(keys)\n\n def __getitem__(self, idx):\n k = keys[idx]\n return latents[k], k\n\ndataset = FluxLatentDataset()\nloader = DataLoader(dataset, batch_size=1, shuffle=True)\n\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:46:01.404819Z","iopub.execute_input":"2026-01-21T06:46:01.405424Z","iopub.status.idle":"2026-01-21T06:46:01.410301Z","shell.execute_reply.started":"2026-01-21T06:46:01.405394Z","shell.execute_reply":"2026-01-21T06:46:01.409594Z"}},"outputs":[],"execution_count":7},{"cell_type":"code","source":"# CELL 6 β Encode text on GPU and CACHE FLUX-READY embeddings\nimport torch, gc\nfrom transformers import AutoTokenizer, AutoModel\n\nMODEL_ID = \"black-forest-labs/FLUX.2-klein-4B\"\n\ntokenizer = AutoTokenizer.from_pretrained(\n MODEL_ID,\n subfolder=\"tokenizer\",\n trust_remote_code=True,\n cache_dir=CACHE_DIR,\n)\n\ntext_encoder = AutoModel.from_pretrained(\n MODEL_ID,\n subfolder=\"text_encoder\",\n trust_remote_code=True,\n dtype=torch.float16,\n cache_dir=CACHE_DIR,\n).to(\"cuda\")\n\ntext_encoder.eval()\n\ntext_cache = {}\n\nwith torch.no_grad():\n for k, caption in captions.items():\n inputs = tokenizer(\n caption,\n padding=\"max_length\",\n truncation=True,\n max_length=128,\n return_tensors=\"pt\"\n ).to(\"cuda\")\n\n outputs = text_encoder(**inputs, output_hidden_states=True, return_dict=True)\n txt = outputs.hidden_states[-1] # [1, T, 2560]\n txt = txt.repeat(1, 1, 3) # β [1, T, 7680]\n text_cache[k] = txt.cpu()\n\nprint(\"β
Cached FLUX-ready text embeddings.\")\n\ndel text_encoder\ntorch.cuda.empty_cache()\ngc.collect()\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:46:04.538956Z","iopub.execute_input":"2026-01-21T06:46:04.539598Z","iopub.status.idle":"2026-01-21T06:47:02.088127Z","shell.execute_reply.started":"2026-01-21T06:46:04.539569Z","shell.execute_reply":"2026-01-21T06:47:02.087363Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"tokenizer_config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"7eb47566c5b642a0a9e8049c67bfd562"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"vocab.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8a7ffb1cb3f34f1cb20db3369301886c"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"merges.txt: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"5648fc23b5e448f0a002da76c7a2cc67"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"tokenizer/tokenizer.json: 0%| | 0.00/11.4M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"0504e1ee64724b409c527c0266cf5864"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"added_tokens.json: 0%| | 0.00/707 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"1cd3f580cebb43689d537c76b99b5517"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"special_tokens_map.json: 0%| | 0.00/613 [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"68fa4dacfd414607acc254a7001fe9cb"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"chat_template.jinja: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"a619e3c684394997ac25522ee88aed23"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"config.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"02e0198de1164fd4a5fd0be433b136e4"}},"metadata":{}},{"name":"stderr","text":"`torch_dtype` is deprecated! Use `dtype` instead!\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"model.safetensors.index.json: 0.00B [00:00, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"af52526aac28456ea10b96f98ddca3df"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Fetching 2 files: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"f36c0d81d75c4c8f9c2956f645313817"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00002-of-00002.safete(β¦): 0%| | 0.00/3.08G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"ee179610b92e4938883a48e47a455ed3"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"text_encoder/model-00001-of-00002.safete(β¦): 0%| | 0.00/4.97G [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"0208a0c69faa4f9eb1747d8cd6fc2cea"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"6006b9a7aef34137b6d46074ec5c5ebf"}},"metadata":{}},{"name":"stdout","text":"β
Cached FLUX-ready text embeddings.\n","output_type":"stream"},{"execution_count":8,"output_type":"execute_result","data":{"text/plain":"8834"},"metadata":{}}],"execution_count":8},{"cell_type":"code","source":"# =========================================================\n# CELL 7A β Inspect Klein Transformer Module Names\n# =========================================================\n\nnames = []\nfor name, module in pipe.transformer.named_modules():\n if isinstance(module, torch.nn.Linear):\n names.append(name)\n\n# print a sample\nfor n in names[:50]:\n print(n)\n\nprint(\"\\n--- TOTAL Linear layers:\", len(names))\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:49:28.032293Z","iopub.execute_input":"2026-01-21T06:49:28.032618Z","iopub.status.idle":"2026-01-21T06:49:28.038556Z","shell.execute_reply.started":"2026-01-21T06:49:28.032589Z","shell.execute_reply":"2026-01-21T06:49:28.037744Z"}},"outputs":[{"name":"stdout","text":"time_guidance_embed.timestep_embedder.linear_1\ntime_guidance_embed.timestep_embedder.linear_2\ndouble_stream_modulation_img.linear\ndouble_stream_modulation_txt.linear\nsingle_stream_modulation.linear\nx_embedder\ncontext_embedder\ntransformer_blocks.0.attn.to_q\ntransformer_blocks.0.attn.to_k\ntransformer_blocks.0.attn.to_v\ntransformer_blocks.0.attn.to_out.0\ntransformer_blocks.0.attn.add_q_proj\ntransformer_blocks.0.attn.add_k_proj\ntransformer_blocks.0.attn.add_v_proj\ntransformer_blocks.0.attn.to_add_out\ntransformer_blocks.0.ff.linear_in\ntransformer_blocks.0.ff.linear_out\ntransformer_blocks.0.ff_context.linear_in\ntransformer_blocks.0.ff_context.linear_out\ntransformer_blocks.1.attn.to_q\ntransformer_blocks.1.attn.to_k\ntransformer_blocks.1.attn.to_v\ntransformer_blocks.1.attn.to_out.0\ntransformer_blocks.1.attn.add_q_proj\ntransformer_blocks.1.attn.add_k_proj\ntransformer_blocks.1.attn.add_v_proj\ntransformer_blocks.1.attn.to_add_out\ntransformer_blocks.1.ff.linear_in\ntransformer_blocks.1.ff.linear_out\ntransformer_blocks.1.ff_context.linear_in\ntransformer_blocks.1.ff_context.linear_out\ntransformer_blocks.2.attn.to_q\ntransformer_blocks.2.attn.to_k\ntransformer_blocks.2.attn.to_v\ntransformer_blocks.2.attn.to_out.0\ntransformer_blocks.2.attn.add_q_proj\ntransformer_blocks.2.attn.add_k_proj\ntransformer_blocks.2.attn.add_v_proj\ntransformer_blocks.2.attn.to_add_out\ntransformer_blocks.2.ff.linear_in\ntransformer_blocks.2.ff.linear_out\ntransformer_blocks.2.ff_context.linear_in\ntransformer_blocks.2.ff_context.linear_out\ntransformer_blocks.3.attn.to_q\ntransformer_blocks.3.attn.to_k\ntransformer_blocks.3.attn.to_v\ntransformer_blocks.3.attn.to_out.0\ntransformer_blocks.3.attn.add_q_proj\ntransformer_blocks.3.attn.add_k_proj\ntransformer_blocks.3.attn.add_v_proj\n\n--- TOTAL Linear layers: 109\n","output_type":"stream"}],"execution_count":10},{"cell_type":"code","source":"# =========================================================\n# CELL 7 β Load FLUX Transformer and Attach PROPER LoRA\n# =========================================================\n\nimport torch, gc\nfrom diffusers import Flux2KleinPipeline\nfrom peft import LoraConfig, get_peft_model\n\nMODEL_ID = \"black-forest-labs/FLUX.2-klein-4B\"\n\npipe = Flux2KleinPipeline.from_pretrained(\n MODEL_ID,\n torch_dtype=torch.float16,\n cache_dir=CACHE_DIR,\n)\n\n# keep ONLY transformer\npipe.text_encoder = None\npipe.vae = None\npipe.scheduler = None\n\ntransformer = pipe.transformer\n\n# ---------------- LoRA TARGETS (FLUX-COMPATIBLE) ----------------\n\nTARGET_MODULES = [\n \"attn.to_q\",\n \"attn.to_k\",\n \"attn.to_v\",\n \"attn.to_out.0\",\n \"attn.add_q_proj\",\n \"attn.add_k_proj\",\n \"attn.add_v_proj\",\n \"attn.to_add_out\",\n \"ff.linear_in\",\n \"ff.linear_out\",\n \"ff_context.linear_in\",\n \"ff_context.linear_out\",\n]\n\nlora_config = LoraConfig(\n r=RANK,\n lora_alpha=ALPHA,\n target_modules=TARGET_MODULES,\n lora_dropout=0.05,\n bias=\"none\",\n)\n\ntransformer = get_peft_model(transformer, lora_config)\ntransformer.enable_gradient_checkpointing()\n\npipe.transformer = transformer\n\ntrainable = sum(p.numel() for p in transformer.parameters() if p.requires_grad)\ntotal = sum(p.numel() for p in transformer.parameters())\n\nprint(f\"Trainable params: {trainable/1e6:.2f}M / {total/1e6:.2f}M\")\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:51:18.251124Z","iopub.execute_input":"2026-01-21T06:51:18.251459Z","iopub.status.idle":"2026-01-21T06:52:31.616038Z","shell.execute_reply.started":"2026-01-21T06:51:18.251433Z","shell.execute_reply":"2026-01-21T06:52:31.615093Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"Loading pipeline components...: 0%| | 0/5 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"81fced2a8a3c4ac3a83e0ec8ba021858"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/2 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"cd5a0cb701cb4f5f9c6bcbb8b202639b"}},"metadata":{}},{"name":"stdout","text":"Trainable params: 9.34M / 3884.88M\n","output_type":"stream"}],"execution_count":12},{"cell_type":"code","source":"# CELL 8 β Move Transformer to GPU Only\npipe.transformer = pipe.transformer.to(\"cuda\")\n\ngc.collect()\ntorch.cuda.empty_cache()\n\nprint(\"β
Transformer on GPU.\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:52:31.618488Z","iopub.execute_input":"2026-01-21T06:52:31.618839Z","iopub.status.idle":"2026-01-21T06:52:34.734557Z","shell.execute_reply.started":"2026-01-21T06:52:31.618800Z","shell.execute_reply":"2026-01-21T06:52:34.733924Z"}},"outputs":[{"name":"stdout","text":"β
Transformer on GPU.\n","output_type":"stream"}],"execution_count":13},{"cell_type":"code","source":"# =========================================================\n# CELL 9 β Flow Matching Training (Fixed Shapes, Safe Saves)\n# =========================================================\n\nimport torch, time, os\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom peft import get_peft_model_state_dict\nfrom safetensors.torch import save_file\n\n# ---------------- Config ----------------\n\nEPOCHS = 1\nSAVE_EVERY = 3\nFLOW_T_SCALE = 50.0\nCLAMP_VAL = 3.0\nGRAD_CLIP = 0.5\n\nMAX_SECONDS = (11 * 60 + 40) * 60 # 11h 40m\n\ndevice = \"cuda\"\ndtype = torch.float16\n\npipe.transformer.train()\n\n# ---------------- Helpers ----------------\n\ndef patchify_latents(latents):\n assert latents.ndim == 4, f\"Expected [B,C,H,W], got {latents.shape}\"\n B, C, H, W = latents.shape\n assert H % 2 == 0 and W % 2 == 0\n\n latents = latents.reshape(B, C, H//2, 2, W//2, 2)\n latents = latents.permute(0, 2, 4, 1, 3, 5).contiguous()\n return latents.view(B, (H//2)*(W//2), C*2*2) # [B, 4096, 128]\n\n\ndef generate_flux_pos_ids(batch, ph, pw, txt_len, device, dtype):\n y = torch.linspace(0, 1, ph, device=device, dtype=dtype)\n x = torch.linspace(0, 1, pw, device=device, dtype=dtype)\n gy, gx = torch.meshgrid(y, x, indexing=\"ij\")\n\n pos_y = gy.flatten()\n pos_x = gx.flatten()\n zeros = torch.zeros_like(pos_y)\n\n img_ids = torch.stack([pos_y, pos_x, zeros, zeros], dim=-1)\n img_ids = img_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n t = torch.arange(txt_len, device=device, dtype=dtype) / txt_len\n txt_ids = torch.stack(\n [t, torch.zeros_like(t), torch.zeros_like(t), torch.zeros_like(t)], dim=-1\n )\n txt_ids = txt_ids.unsqueeze(0).repeat(batch, 1, 1)\n\n return img_ids, txt_ids\n\n\n# ---------------- Optimizer ----------------\n\ntrainable_params = [p for p in pipe.transformer.parameters() if p.requires_grad]\n\noptimizer = torch.optim.AdamW(\n trainable_params,\n lr=LR,\n betas=(0.9, 0.999),\n weight_decay=1e-4,\n)\n\n# ---------------- Training ----------------\n\nstart_time = time.time()\n\nprint(\"β±οΈ Training started. Max time:\", MAX_SECONDS / 3600, \"hours\")\n\nfor epoch in range(1, EPOCHS + 1):\n\n print(f\"\\n===== Epoch {epoch} =====\")\n epoch_loss = 0.0\n\n for latent_b, key in tqdm(loader, desc=f\"Epoch {epoch}\"):\n\n if time.time() - start_time >= MAX_SECONDS:\n print(\"β° Time limit reached. Stopping training.\")\n break\n\n # ---- latent ----\n latent_b = latent_b.to(device, dtype=dtype)\n\n if latent_b.ndim == 5:\n latent_b = latent_b.squeeze(1)\n if latent_b.ndim == 3:\n latent_b = latent_b.unsqueeze(0)\n\n # ---- text ----\n enc_b = text_cache[key[0]].to(device, dtype=dtype) # [1, T, 7680]\n\n if epoch == 1:\n print(\"latent:\", latent_b.shape) # [1,32,128,128]\n print(\"text:\", enc_b.shape) # [1,128,7680]\n\n # ---- patchify ----\n tokens = patchify_latents(latent_b) # [1,4096,128]\n tokens = torch.clamp(tokens, -CLAMP_VAL, CLAMP_VAL)\n\n # ---- flow matching ----\n eps = torch.randn_like(tokens)\n eps = torch.clamp(eps, -CLAMP_VAL, CLAMP_VAL)\n\n t = torch.rand(tokens.size(0), device=device, dtype=dtype)\n\n z_t = (1 - t[:, None, None]) * eps + t[:, None, None] * tokens\n target = tokens - eps\n t_embed = t * FLOW_T_SCALE\n\n # ---- pos ids ----\n img_ids, txt_ids = generate_flux_pos_ids(\n tokens.size(0), 64, 64, enc_b.size(1), device, dtype\n )\n\n # ---- forward ----\n with torch.autocast(\"cuda\", dtype=torch.float16):\n pred = pipe.transformer(\n hidden_states=z_t, # NOT embedded\n timestep=t_embed,\n encoder_hidden_states=enc_b,\n img_ids=img_ids,\n txt_ids=txt_ids,\n return_dict=False,\n )[0]\n\n loss = F.mse_loss(pred.float(), target.float())\n\n # ---- backward ----\n loss.backward()\n torch.nn.utils.clip_grad_norm_(trainable_params, GRAD_CLIP)\n optimizer.step()\n optimizer.zero_grad(set_to_none=True)\n\n epoch_loss += loss.item()\n\n avg_loss = epoch_loss / max(1, len(loader))\n print(f\"Epoch {epoch} | Avg Loss: {avg_loss:.6f}\")\n\n # ---- checkpoint ----\n if epoch % SAVE_EVERY == 0:\n save_path = os.path.join(SAVE_DIR, f\"flux_klein_lora_epoch_{epoch:03d}.safetensors\")\n lora_state = get_peft_model_state_dict(pipe.transformer)\n save_file(lora_state, save_path)\n print(\"πΎ Saved LoRA:\", save_path)\n\n if time.time() - start_time >= MAX_SECONDS:\n break\n\n\n# ---------------- Final Save ----------------\n\nfinal_path = os.path.join(SAVE_DIR, \"flux_klein_lora_final.safetensors\")\nlora_state = get_peft_model_state_dict(pipe.transformer)\nsave_file(lora_state, final_path)\n\nprint(\"β
Final FLUX LoRA saved:\", final_path)\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2026-01-21T06:59:02.591112Z","iopub.execute_input":"2026-01-21T06:59:02.591450Z"}},"outputs":[{"name":"stdout","text":"β±οΈ Training started. Max time: 11.666666666666666 hours\n\n===== Epoch 1 =====\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 0%| | 0/125 [00:00<?, ?it/s]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 1%| | 1/125 [00:22<47:05, 22.79s/it]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 2%|β | 2/125 [00:45<46:29, 22.68s/it]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 2%|β | 3/125 [01:08<46:05, 22.67s/it]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 3%|β | 4/125 [01:30<45:41, 22.66s/it]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 4%|β | 5/125 [01:53<45:19, 22.66s/it]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"},{"name":"stderr","text":"Epoch 1: 5%|β | 6/125 [02:16<45:00, 22.69s/it]","output_type":"stream"},{"name":"stdout","text":"latent: torch.Size([1, 32, 128, 128])\ntext: torch.Size([1, 128, 7680])\n","output_type":"stream"}],"execution_count":null}]}
|