diff --git "a/Qwen destill.ipynb" "b/Qwen destill.ipynb" --- "a/Qwen destill.ipynb" +++ "b/Qwen destill.ipynb" @@ -1 +1 @@ -{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":16012,"status":"ok","timestamp":1775044094232,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"9QVc2_k_bL3P","outputId":"595bc17c-d374-4222-e293-4fd31d520bbc"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= CELL 1.a: Extract Data + Encode Latents (FLUX.2-klein VAE) =============================\n","# @title 1.a โ€“ Prepare Images and Encode with Correct FLUX.2 VAE\n","\n","import os\n","import zipfile\n","import torch\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import set_seed\n","\n","set_seed(42) # if you have it imported, otherwise add: from transformers import set_seed\n","drive.mount('/content/drive', force_remount=True)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","# ====================== 1. Extract Data ======================\n","print(\"๐Ÿ“ฆ Extracting zip...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"โœ… Found {len(image_files)} images\")\n","\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"โœ… Loaded {len(texts)} captions\")\n","\n","# ====================== 2. Encode Images โ†’ FLUX.2 VAE Latents (Correct for klein) ======================\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","if os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files):\n"," print(f\"โœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"\\n๐ŸŒ€ Encoding images to FLUX.2 VAE latents (recommended for FLUX.2-klein)...\")\n","\n"," # Use FLUX.2 VAE (not FLUX.1-dev)\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.2-dev\", # or \"black-forest-labs/FLUX.2-klein-4B\" if it has vae subfolder\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"โœ… Latents saved to {latent_dir} (using FLUX.2 VAE)\")"],"metadata":{"id":"mBSTd5Cb7nmF","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 1.b: Teacher Embeddings + Distill 768-dim Encoder =============================\n","# @title 1.b โ€“ Distill Qwen2.5-0.5B to 768-dim (using your 250 texts)\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from transformers import AutoTokenizer, AutoModel\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from transformers import Trainer, TrainingArguments, set_seed\n","\n","set_seed(42)\n","\n","# ====================== 3. Compute Teacher Embeddings (768-dim) ======================\n","print(\"\\n๐Ÿ“ Computing teacher embeddings (Qwen3-Embedding-0.6B) ...\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","teacher_model.eval()\n","\n","teacher_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu() # (768,)\n"," teacher_embeddings.append(emb)\n","\n","teacher_embeddings_768 = torch.stack(teacher_embeddings)\n","print(f\"โœ… Teacher embeddings shape: {teacher_embeddings_768.shape}\")\n","\n","# Save teacher embeddings\n","torch.save({\n"," \"embeddings\": teacher_embeddings_768,\n"," \"texts\": texts,\n"," \"dim\": 768\n","}, \"/content/drive/MyDrive/qwen_embeddings_768.pt\")\n","\n","del teacher_model\n","torch.cuda.empty_cache()\n","\n","# ====================== 4. Distill Student (Qwen2.5-0.5B โ†’ 768-dim) ======================\n","print(\"\\n๐Ÿ‘จโ€๐ŸŽ“ Distilling student Qwen2.5-0.5B to 768-dim...\")\n","\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","base_student = AutoModel.from_pretrained(\n"," student_model_name,\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_student, lora_config)\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.train()\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_768)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n","class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\").to(\"cuda\") # (B, 768)\n","\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1) # (B, student_hidden)\n"," student_emb = projection(hidden) # (B, 768)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.25 * mse_loss + 0.75 * cos_loss\n","\n"," return (loss, outputs) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_768\",\n"," per_device_train_batch_size=4,\n"," num_train_epochs=50,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"๐Ÿš€ Starting distillation to 768-dim...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","distilled_save_dir = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","os.makedirs(distilled_save_dir, exist_ok=True)\n","student_model.save_pretrained(distilled_save_dir)\n","tokenizer.save_pretrained(distilled_save_dir)\n","torch.save(projection.state_dict(), f\"{distilled_save_dir}/projection.pth\")\n","\n","print(f\"\\nโœ… SUCCESS! 768-dim distilled encoder saved to {distilled_save_dir}\")\n","print(f\" Latents are ready in {latent_dir}\")\n","print(\" You can now run Cell 2.a (load models) + updated Cell 2.b (test forward pass).\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"id":"AV4tp4G87zWH","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 1.c: Pre-compute Teacher & Student Embeddings =============================\n","# @title 1.c โ€“ Pre-compute and save embeddings from both teacher and distilled student\n","\n","import torch\n","import torch.nn.functional as F\n","from tqdm import tqdm\n","from google.colab import drive\n","from transformers import AutoTokenizer, AutoModel # Added this import\n","from peft import PeftModel # Added this import\n","import torch.nn as nn # Added this import for nn.Linear\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Load your saved teacher data (from Cell 1.b)\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","teacher_embs = data[\"embeddings\"].to(\"cuda\") # (250, 768)\n","\n","print(f\"Loaded {len(texts)} texts and teacher embeddings.\")\n","\n","# Load distilled student (same as in Cell 2.a)\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n",")\n","\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR)\n","student = student.to(\"cuda\")\n","student.eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"โœ… Distilled student + projection loaded.\")\n","\n","# Pre-compute student embeddings\n","student_embs_list = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Computing student embeddings\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1) # (1, hidden_size)\n"," emb_768 = projection(hidden).to(torch.float32) # (1, 768)\n"," student_embs_list.append(emb_768.squeeze(0))\n","\n","student_embs = torch.stack(student_embs_list) # (250, 768)\n","\n","# Compute alignment stats\n","cos_sims = []\n","for i in range(len(texts)):\n"," cos = F.cosine_similarity(student_embs[i], teacher_embs[i], dim=0)\n"," cos_sims.append(cos.item())\n","\n","cos_sims = torch.tensor(cos_sims)\n","print(\"\\n๐Ÿ“Š Alignment Summary (Teacher vs Student):\")\n","print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\" Min: {cos_sims.min().item():.4f} | Max: {cos_sims.max().item():.4f} | Std: {cos_sims.std().item():.4f}\")\n","\n","# Save everything\n","save_path = \"/content/drive/MyDrive/qwen_embeddings_768_full.pt\"\n","torch.save({\n"," \"texts\": texts,\n"," \"teacher_embeddings\": teacher_embs.cpu(), # (250, 768)\n"," \"student_embeddings\": student_embs.cpu(), # (250, 768)\n"," \"cosine_similarities\": cos_sims,\n"," \"dim\": 768\n","}, save_path)\n","\n","print(f\"\\nโœ… All embeddings saved to {save_path}\")\n","print(\" You can now use this file in Cell 2.x for evaluation or LoRA training.\")\n","torch.cuda.empty_cache()"],"metadata":{"id":"qF8kMiOk85EY","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Test stuff in cell 2"],"metadata":{"id":"dz4oMmJ79bxQ"}},{"cell_type":"code","source":["# ============================= CELL 2.a: Load Models & Data for Evaluation =============================\n","# @title 2.a โ€“ Load distilled student, transformer, and pre-computed embeddings\n","\n","import torch\n","import torch.nn as nn\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","from diffusers import FluxTransformer2DModel\n","from google.colab import drive\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== 1. Load Distilled Student (768-dim) ======================\n","print(\"๐Ÿ”„ Loading distilled Qwen2.5-0.5B (768-dim)...\")\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n",")\n","\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR)\n","student = student.to(\"cuda\")\n","student.eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"โœ… Distilled student + projection loaded.\")\n","print(f\" Student hidden size: {base_student.config.hidden_size} โ†’ 768-dim output\")\n","\n","# ====================== 2. Load FLUX.2-klein Transformer ======================\n","print(\"\\n๐Ÿ”„ Loading FLUX.2-klein transformer...\")\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False\n",").to(\"cuda\")\n","\n","print(\"โœ… FLUX.2-klein transformer loaded on CUDA\")\n","\n","# ====================== 3. Load Pre-computed Embeddings ======================\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768_full.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","teacher_embs = data[\"teacher_embeddings\"].to(\"cuda\") # (250, 768)\n","student_embs = data[\"student_embeddings\"].to(\"cuda\") # (250, 768)\n","\n","print(f\"โœ… Loaded pre-computed embeddings for {len(texts)} texts\")\n","print(f\" Teacher shape: {teacher_embs.shape} | Student shape: {student_embs.shape}\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"id":"8B4f-gxe9kpC","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.b: Alignment Test (Teacher vs Student) =============================\n","# @title 2.b โ€“ Cosine similarity across all 250 embeddings\n","\n","import torch\n","import torch.nn.functional as F\n","from tqdm import tqdm\n","\n","print(\"๐Ÿ”ฌ Computing alignment between teacher and student on all 250 texts...\")\n","\n","cos_sims = []\n","with torch.no_grad():\n"," for i in tqdm(range(len(texts)), desc=\"Alignment check\"):\n"," cos = F.cosine_similarity(student_embs[i], teacher_embs[i], dim=0)\n"," cos_sims.append(cos.item())\n","\n","cos_sims = torch.tensor(cos_sims)\n","\n","print(\"\\nโœ… ALIGNMENT RESULTS (Teacher vs Distilled Student):\")\n","print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\" Minimum: {cos_sims.min().item():.4f}\")\n","print(f\" Maximum: {cos_sims.max().item():.4f}\")\n","print(f\" Standard deviation: {cos_sims.std().item():.4f}\")\n","\n","if cos_sims.mean().item() > 0.94:\n"," print(\"๐ŸŽ‰ Excellent alignment! Your distillation is high quality.\")\n","elif cos_sims.mean().item() > 0.90:\n"," print(\"โœ… Good alignment. Safe for LoRA training.\")\n","else:\n"," print(\"โš ๏ธ Alignment is moderate. Consider more distillation epochs or higher LoRA rank.\")"],"metadata":{"id":"kMiMZmie9n1V","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.c: Fixed with 64โ†’128 Projection for Klein =============================\n","# @title 2.c โ€“ Test with standard packing + small 64โ†’128 projection\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","\n","text = texts[0]\n","print(f\"๐Ÿงช Test text: {text[:100]}...\")\n","\n","# 1. Student embedding\n","inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_768 = projection(hidden).to(torch.bfloat16)\n","\n","print(f\"โœ… emb_768 shape: {emb_768.shape}\")\n","\n","# 2. Text side (correct)\n","encoder_hidden = emb_768.unsqueeze(1).repeat(1, 1, 10) # (1, 1, 7680)\n","pooled_projections = emb_768\n","\n","# 3. Rotary IDs\n","txt_ids = torch.zeros((1, 3), device=\"cuda\", dtype=torch.bfloat16)\n","img_ids = torch.zeros((4096, 3), device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 4. Latent packing (64 ch) + projection to 128\n","dummy_latent = torch.randn(1, 16, 128, 128, device=\"cuda\", dtype=torch.bfloat16)\n","b, c, h, w = dummy_latent.shape\n","latent_packed = dummy_latent.view(b, c, h//2, 2, w//2, 2)\n","latent_packed = latent_packed.permute(0, 2, 4, 1, 3, 5).contiguous()\n","hidden_states = latent_packed.view(b, (h//2)*(w//2), c * 4) # (1, 4096, 64)\n","\n","# Small fixed projection 64 โ†’ 128\n","proj_64_to_128 = nn.Linear(64, 128, bias=False, dtype=torch.bfloat16, device=\"cuda\")\n","hidden_states = proj_64_to_128(hidden_states)\n","\n","print(f\"hidden_states after 64โ†’128: {hidden_states.shape}\")\n","\n","timestep = torch.tensor([1000.0], device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 5. Forward pass\n","print(\"\\n=== Running forward pass ===\")\n","try:\n"," output = transformer(\n"," hidden_states=hidden_states,\n"," timestep=timestep,\n"," encoder_hidden_states=encoder_hidden,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n"," print(\"๐ŸŽ‰ SUCCESS! Forward pass works.\")\n"," print(f\"Output shape: {output.shape}\")\n","except Exception as e:\n"," print(\"โŒ Failed:\")\n"," print(str(e))"],"metadata":{"cellView":"form","id":"9c0V4D7sCP2B"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Cell 3\n"],"metadata":{"id":"dz6FDD1aBSCt"}},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["๐Ÿ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# CELL 3: Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"๐Ÿ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past thispoint. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"sWEzuqsmvKua"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"โœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\n๐Ÿ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"โœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\n๐Ÿ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"8hDfeHlNvPWE"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"โœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"px5wQJZ2vTUf"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup โ€“ Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"โœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":7639,"status":"ok","timestamp":1775041892679,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"llxOztXpvYMO","outputId":"ba09e58b-b148-4838-d108-e8da123b4cdd"},"outputs":[{"name":"stdout","output_type":"stream","text":["=== CELL 4.a โ€“ Loading Transformer + LoRA ===\n","Current VRAM before loading: 0.53 GB\n","Loading FLUX.2-klein-base-4B transformer...\n"]},{"name":"stderr","output_type":"stream","text":["Some weights of the model checkpoint at black-forest-labs/FLUX.2-klein-base-4B were not used when initializing FluxTransformer2DModel: \n"," ['double_stream_modulation_img.linear.weight, single_transformer_blocks.3.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.1.attn.to_out.weight, single_transformer_blocks.7.attn.to_out.weight, single_transformer_blocks.12.attn.to_out.weight, single_transformer_blocks.10.attn.to_out.weight, single_transformer_blocks.6.attn.to_qkv_mlp_proj.weight, transformer_blocks.1.ff.linear_out.weight, single_transformer_blocks.11.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.8.attn.to_out.weight, single_transformer_blocks.4.attn.to_qkv_mlp_proj.weight, transformer_blocks.2.ff.linear_in.weight, transformer_blocks.2.ff_context.linear_out.weight, single_transformer_blocks.6.attn.to_out.weight, transformer_blocks.0.ff.linear_in.weight, single_transformer_blocks.18.attn.to_out.weight, single_transformer_blocks.14.attn.to_out.weight, single_transformer_blocks.11.attn.to_out.weight, single_transformer_blocks.12.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.19.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.14.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.4.attn.to_out.weight, transformer_blocks.0.ff_context.linear_in.weight, single_transformer_blocks.15.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.16.attn.to_out.weight, transformer_blocks.3.ff_context.linear_in.weight, transformer_blocks.0.ff.linear_out.weight, transformer_blocks.4.ff_context.linear_out.weight, transformer_blocks.0.ff_context.linear_out.weight, single_transformer_blocks.1.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.10.attn.to_qkv_mlp_proj.weight, transformer_blocks.3.ff.linear_in.weight, single_transformer_blocks.5.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.2.attn.to_qkv_mlp_proj.weight, transformer_blocks.4.ff.linear_out.weight, single_transformer_blocks.17.attn.to_out.weight, single_transformer_blocks.0.attn.to_out.weight, transformer_blocks.1.ff_context.linear_in.weight, single_transformer_blocks.5.attn.to_out.weight, single_transformer_blocks.17.attn.to_qkv_mlp_proj.weight, double_stream_modulation_txt.linear.weight, single_transformer_blocks.3.attn.to_out.weight, transformer_blocks.2.ff_context.linear_in.weight, single_transformer_blocks.16.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.0.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.18.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.13.attn.to_qkv_mlp_proj.weight, single_transformer_blocks.7.attn.to_qkv_mlp_proj.weight, time_guidance_embed.timestep_embedder.linear_1.weight, transformer_blocks.4.ff_context.linear_in.weight, single_transformer_blocks.13.attn.to_out.weight, transformer_blocks.1.ff_context.linear_out.weight, single_transformer_blocks.2.attn.to_out.weight, single_transformer_blocks.9.attn.to_out.weight, single_stream_modulation.linear.weight, time_guidance_embed.timestep_embedder.linear_2.weight, single_transformer_blocks.9.attn.to_qkv_mlp_proj.weight, transformer_blocks.2.ff.linear_out.weight, single_transformer_blocks.19.attn.to_out.weight, transformer_blocks.3.ff.linear_out.weight, transformer_blocks.4.ff.linear_in.weight, transformer_blocks.1.ff.linear_in.weight, transformer_blocks.3.ff_context.linear_out.weight, single_transformer_blocks.15.attn.to_out.weight, single_transformer_blocks.8.attn.to_qkv_mlp_proj.weight']\n","Some weights of FluxTransformer2DModel were not initialized from the model checkpoint at black-forest-labs/FLUX.2-klein-base-4B and are newly initialized: ['transformer_blocks.3.ff.net.0.proj.bias', 'single_transformer_blocks.5.attn.to_v.weight', 'transformer_blocks.4.norm1_context.linear.bias', 'single_transformer_blocks.9.attn.to_q.bias', 'transformer_blocks.2.attn.add_v_proj.bias', 'transformer_blocks.2.attn.add_k_proj.bias', 'transformer_blocks.3.attn.to_k.bias', 'transformer_blocks.1.ff_context.net.0.proj.bias', 'single_transformer_blocks.0.norm.linear.weight', 'transformer_blocks.3.attn.add_q_proj.bias', 'single_transformer_blocks.2.norm.linear.bias', 'single_transformer_blocks.11.attn.to_q.bias', 'transformer_blocks.0.ff_context.net.0.proj.bias', 'single_transformer_blocks.18.attn.to_v.bias', 'single_transformer_blocks.18.attn.to_k.bias', 'single_transformer_blocks.2.attn.to_v.weight', 'transformer_blocks.4.attn.to_k.bias', 'single_transformer_blocks.9.proj_mlp.bias', 'transformer_blocks.4.ff_context.net.0.proj.bias', 'transformer_blocks.3.norm1.linear.weight', 'single_transformer_blocks.16.attn.to_v.weight', 'transformer_blocks.4.ff.net.0.proj.weight', 'transformer_blocks.3.ff.net.2.weight', 'transformer_blocks.4.norm1.linear.weight', 'single_transformer_blocks.15.proj_mlp.bias', 'single_transformer_blocks.18.attn.to_k.weight', 'transformer_blocks.2.ff_context.net.0.proj.weight', 'single_transformer_blocks.10.proj_out.weight', 'single_transformer_blocks.17.proj_out.weight', 'single_transformer_blocks.19.proj_out.weight', 'transformer_blocks.2.ff.net.0.proj.bias', 'single_transformer_blocks.0.proj_out.bias', 'single_transformer_blocks.19.attn.to_q.weight', 'single_transformer_blocks.17.attn.to_k.bias', 'single_transformer_blocks.7.attn.to_q.weight', 'single_transformer_blocks.6.proj_out.bias', 'transformer_blocks.0.norm1.linear.bias', 'single_transformer_blocks.1.attn.to_v.weight', 'single_transformer_blocks.13.norm.linear.bias', 'transformer_blocks.1.norm1.linear.bias', 'transformer_blocks.3.attn.to_q.bias', 'single_transformer_blocks.19.attn.to_q.bias', 'transformer_blocks.0.ff_context.net.2.bias', 'single_transformer_blocks.15.attn.to_v.weight', 'single_transformer_blocks.16.proj_mlp.weight', 'single_transformer_blocks.9.norm.linear.weight', 'transformer_blocks.3.attn.to_add_out.bias', 'single_transformer_blocks.0.attn.to_v.bias', 'single_transformer_blocks.19.attn.to_v.bias', 'transformer_blocks.4.ff_context.net.0.proj.weight', 'single_transformer_blocks.12.norm.linear.bias', 'single_transformer_blocks.15.attn.to_q.weight', 'single_transformer_blocks.4.proj_out.weight', 'single_transformer_blocks.16.attn.to_v.bias', 'transformer_blocks.1.ff.net.2.bias', 'single_transformer_blocks.7.attn.to_v.bias', 'single_transformer_blocks.7.attn.to_q.bias', 'single_transformer_blocks.16.proj_out.bias', 'transformer_blocks.2.ff.net.2.bias', 'single_transformer_blocks.0.norm.linear.bias', 'single_transformer_blocks.2.attn.to_k.bias', 'transformer_blocks.3.ff_context.net.2.weight', 'single_transformer_blocks.8.attn.to_v.bias', 'single_transformer_blocks.11.proj_mlp.bias', 'transformer_blocks.4.ff.net.2.weight', 'single_transformer_blocks.7.attn.to_v.weight', 'single_transformer_blocks.18.attn.to_v.weight', 'single_transformer_blocks.16.proj_out.weight', 'single_transformer_blocks.4.attn.to_q.bias', 'single_transformer_blocks.2.attn.to_k.weight', 'transformer_blocks.1.attn.to_k.bias', 'single_transformer_blocks.1.attn.to_k.weight', 'single_transformer_blocks.13.attn.to_q.weight', 'single_transformer_blocks.4.attn.to_v.weight', 'single_transformer_blocks.3.attn.to_v.weight', 'single_transformer_blocks.9.attn.to_q.weight', 'single_transformer_blocks.16.attn.to_k.weight', 'transformer_blocks.3.norm1_context.linear.bias', 'single_transformer_blocks.15.attn.to_v.bias', 'single_transformer_blocks.1.norm.linear.weight', 'single_transformer_blocks.9.norm.linear.bias', 'transformer_blocks.2.norm1_context.linear.weight', 'single_transformer_blocks.15.attn.to_k.bias', 'single_transformer_blocks.18.norm.linear.bias', 'transformer_blocks.0.attn.add_q_proj.bias', 'norm_out.linear.bias', 'single_transformer_blocks.14.proj_mlp.bias', 'single_transformer_blocks.17.norm.linear.weight', 'single_transformer_blocks.0.proj_mlp.bias', 'single_transformer_blocks.16.norm.linear.bias', 'single_transformer_blocks.2.proj_out.weight', 'single_transformer_blocks.13.attn.to_v.bias', 'single_transformer_blocks.18.norm.linear.weight', 'single_transformer_blocks.5.attn.to_k.bias', 'single_transformer_blocks.10.attn.to_k.bias', 'single_transformer_blocks.5.proj_out.bias', 'single_transformer_blocks.7.attn.to_k.bias', 'single_transformer_blocks.18.proj_out.weight', 'transformer_blocks.2.ff.net.2.weight', 'single_transformer_blocks.6.attn.to_k.bias', 'single_transformer_blocks.11.norm.linear.weight', 'single_transformer_blocks.17.norm.linear.bias', 'single_transformer_blocks.19.proj_mlp.bias', 'single_transformer_blocks.2.proj_out.bias', 'single_transformer_blocks.4.attn.to_v.bias', 'transformer_blocks.3.attn.to_out.0.bias', 'single_transformer_blocks.2.attn.to_q.bias', 'single_transformer_blocks.18.proj_mlp.bias', 'transformer_blocks.4.norm1_context.linear.weight', 'single_transformer_blocks.6.attn.to_v.bias', 'single_transformer_blocks.1.proj_mlp.bias', 'transformer_blocks.1.attn.add_q_proj.bias', 'transformer_blocks.3.attn.add_k_proj.bias', 'transformer_blocks.2.ff.net.0.proj.weight', 'single_transformer_blocks.6.norm.linear.weight', 'transformer_blocks.0.attn.to_q.bias', 'transformer_blocks.2.norm1.linear.weight', 'single_transformer_blocks.4.norm.linear.weight', 'single_transformer_blocks.11.proj_out.weight', 'single_transformer_blocks.19.attn.to_v.weight', 'transformer_blocks.0.attn.add_k_proj.bias', 'single_transformer_blocks.2.proj_mlp.bias', 'transformer_blocks.4.ff_context.net.2.weight', 'transformer_blocks.3.ff.net.2.bias', 'single_transformer_blocks.17.attn.to_v.weight', 'transformer_blocks.4.ff_context.net.2.bias', 'transformer_blocks.1.ff_context.net.2.bias', 'transformer_blocks.4.attn.to_out.0.bias', 'single_transformer_blocks.9.attn.to_k.weight', 'single_transformer_blocks.0.proj_out.weight', 'single_transformer_blocks.7.norm.linear.bias', 'single_transformer_blocks.12.attn.to_v.bias', 'single_transformer_blocks.13.attn.to_k.bias', 'single_transformer_blocks.4.attn.to_q.weight', 'single_transformer_blocks.19.attn.to_k.bias', 'transformer_blocks.0.ff_context.net.0.proj.weight', 'single_transformer_blocks.4.attn.to_k.weight', 'time_text_embed.text_embedder.linear_1.bias', 'transformer_blocks.3.attn.add_v_proj.bias', 'transformer_blocks.1.ff_context.net.0.proj.weight', 'single_transformer_blocks.8.attn.to_q.bias', 'single_transformer_blocks.10.proj_mlp.bias', 'transformer_blocks.4.attn.add_q_proj.bias', 'single_transformer_blocks.4.proj_out.bias', 'single_transformer_blocks.16.attn.to_k.bias', 'transformer_blocks.4.attn.to_add_out.bias', 'transformer_blocks.2.norm1_context.linear.bias', 'single_transformer_blocks.8.attn.to_k.bias', 'transformer_blocks.0.attn.add_v_proj.bias', 'single_transformer_blocks.17.attn.to_k.weight', 'transformer_blocks.2.ff_context.net.0.proj.bias', 'single_transformer_blocks.19.proj_out.bias', 'single_transformer_blocks.10.proj_mlp.weight', 'transformer_blocks.0.ff.net.2.weight', 'single_transformer_blocks.5.attn.to_k.weight', 'single_transformer_blocks.7.proj_mlp.bias', 'single_transformer_blocks.12.proj_out.bias', 'single_transformer_blocks.18.proj_out.bias', 'single_transformer_blocks.8.attn.to_v.weight', 'single_transformer_blocks.14.attn.to_v.weight', 'single_transformer_blocks.12.proj_mlp.weight', 'transformer_blocks.4.norm1.linear.bias', 'transformer_blocks.1.norm1_context.linear.weight', 'single_transformer_blocks.5.attn.to_q.weight', 'single_transformer_blocks.1.proj_out.weight', 'single_transformer_blocks.2.attn.to_q.weight', 'transformer_blocks.1.norm1_context.linear.bias', 'single_transformer_blocks.1.attn.to_q.bias', 'single_transformer_blocks.1.attn.to_q.weight', 'time_text_embed.timestep_embedder.linear_1.bias', 'single_transformer_blocks.12.attn.to_k.bias', 'single_transformer_blocks.6.proj_out.weight', 'single_transformer_blocks.9.proj_out.bias', 'single_transformer_blocks.13.proj_out.weight', 'time_text_embed.text_embedder.linear_1.weight', 'transformer_blocks.2.attn.add_q_proj.bias', 'single_transformer_blocks.7.proj_out.bias', 'single_transformer_blocks.1.proj_mlp.weight', 'single_transformer_blocks.10.norm.linear.weight', 'single_transformer_blocks.5.attn.to_q.bias', 'single_transformer_blocks.0.attn.to_k.weight', 'single_transformer_blocks.18.proj_mlp.weight', 'single_transformer_blocks.12.proj_mlp.bias', 'transformer_blocks.0.ff.net.2.bias', 'single_transformer_blocks.13.norm.linear.weight', 'single_transformer_blocks.3.proj_mlp.bias', 'single_transformer_blocks.11.attn.to_k.bias', 'single_transformer_blocks.18.attn.to_q.weight', 'single_transformer_blocks.1.attn.to_k.bias', 'single_transformer_blocks.4.proj_mlp.weight', 'transformer_blocks.0.attn.to_out.0.bias', 'single_transformer_blocks.6.attn.to_q.weight', 'single_transformer_blocks.6.proj_mlp.weight', 'single_transformer_blocks.3.attn.to_v.bias', 'single_transformer_blocks.13.proj_mlp.bias', 'single_transformer_blocks.19.proj_mlp.weight', 'time_text_embed.timestep_embedder.linear_1.weight', 'single_transformer_blocks.11.attn.to_v.weight', 'transformer_blocks.1.ff.net.0.proj.bias', 'single_transformer_blocks.7.norm.linear.weight', 'single_transformer_blocks.8.proj_out.bias', 'transformer_blocks.3.norm1.linear.bias', 'single_transformer_blocks.14.attn.to_v.bias', 'transformer_blocks.0.ff_context.net.2.weight', 'single_transformer_blocks.16.norm.linear.weight', 'transformer_blocks.0.attn.to_add_out.bias', 'single_transformer_blocks.14.proj_out.weight', 'single_transformer_blocks.9.attn.to_v.bias', 'single_transformer_blocks.14.proj_out.bias', 'single_transformer_blocks.14.norm.linear.bias', 'transformer_blocks.2.attn.to_out.0.bias', 'transformer_blocks.3.attn.to_v.bias', 'transformer_blocks.0.attn.to_v.bias', 'transformer_blocks.4.attn.to_q.bias', 'transformer_blocks.4.attn.add_k_proj.bias', 'transformer_blocks.2.attn.to_q.bias', 'single_transformer_blocks.9.proj_mlp.weight', 'single_transformer_blocks.16.attn.to_q.bias', 'transformer_blocks.3.ff.net.0.proj.weight', 'transformer_blocks.3.norm1_context.linear.weight', 'transformer_blocks.2.attn.to_add_out.bias', 'single_transformer_blocks.14.norm.linear.weight', 'single_transformer_blocks.3.attn.to_k.bias', 'single_transformer_blocks.13.attn.to_v.weight', 'context_embedder.bias', 'transformer_blocks.4.ff.net.0.proj.bias', 'transformer_blocks.0.norm1_context.linear.bias', 'single_transformer_blocks.7.proj_mlp.weight', 'single_transformer_blocks.12.attn.to_v.weight', 'single_transformer_blocks.6.proj_mlp.bias', 'single_transformer_blocks.12.norm.linear.weight', 'single_transformer_blocks.15.attn.to_q.bias', 'single_transformer_blocks.15.norm.linear.weight', 'transformer_blocks.2.norm1.linear.bias', 'single_transformer_blocks.17.attn.to_q.bias', 'single_transformer_blocks.6.attn.to_k.weight', 'proj_out.bias', 'single_transformer_blocks.0.attn.to_q.weight', 'single_transformer_blocks.13.attn.to_q.bias', 'single_transformer_blocks.15.proj_out.weight', 'single_transformer_blocks.12.attn.to_q.weight', 'transformer_blocks.1.ff.net.0.proj.weight', 'single_transformer_blocks.14.proj_mlp.weight', 'single_transformer_blocks.0.proj_mlp.weight', 'transformer_blocks.1.norm1.linear.weight', 'transformer_blocks.1.attn.to_add_out.bias', 'single_transformer_blocks.14.attn.to_q.bias', 'transformer_blocks.3.ff_context.net.0.proj.bias', 'single_transformer_blocks.8.proj_mlp.bias', 'transformer_blocks.2.attn.to_v.bias', 'transformer_blocks.0.attn.to_k.bias', 'single_transformer_blocks.6.norm.linear.bias', 'single_transformer_blocks.10.norm.linear.bias', 'single_transformer_blocks.14.attn.to_q.weight', 'single_transformer_blocks.8.norm.linear.bias', 'single_transformer_blocks.11.attn.to_v.bias', 'single_transformer_blocks.17.proj_out.bias', 'single_transformer_blocks.5.proj_out.weight', 'single_transformer_blocks.4.attn.to_k.bias', 'single_transformer_blocks.15.attn.to_k.weight', 'single_transformer_blocks.18.attn.to_q.bias', 'single_transformer_blocks.5.norm.linear.weight', 'transformer_blocks.0.norm1.linear.weight', 'single_transformer_blocks.11.attn.to_q.weight', 'transformer_blocks.2.ff_context.net.2.weight', 'single_transformer_blocks.3.norm.linear.weight', 'transformer_blocks.4.attn.to_v.bias', 'single_transformer_blocks.19.attn.to_k.weight', 'single_transformer_blocks.10.proj_out.bias', 'single_transformer_blocks.15.norm.linear.bias', 'transformer_blocks.4.attn.add_v_proj.bias', 'single_transformer_blocks.10.attn.to_q.weight', 'single_transformer_blocks.7.proj_out.weight', 'single_transformer_blocks.15.proj_out.bias', 'single_transformer_blocks.9.proj_out.weight', 'single_transformer_blocks.8.norm.linear.weight', 'single_transformer_blocks.2.attn.to_v.bias', 'transformer_blocks.1.attn.add_v_proj.bias', 'single_transformer_blocks.17.attn.to_q.weight', 'single_transformer_blocks.3.proj_out.bias', 'transformer_blocks.3.ff_context.net.2.bias', 'transformer_blocks.1.ff_context.net.2.weight', 'single_transformer_blocks.14.attn.to_k.bias', 'transformer_blocks.2.attn.to_k.bias', 'single_transformer_blocks.5.proj_mlp.bias', 'transformer_blocks.0.norm1_context.linear.weight', 'transformer_blocks.1.attn.to_v.bias', 'single_transformer_blocks.5.attn.to_v.bias', 'single_transformer_blocks.10.attn.to_v.bias', 'single_transformer_blocks.11.proj_mlp.weight', 'transformer_blocks.2.ff_context.net.2.bias', 'single_transformer_blocks.11.attn.to_k.weight', 'transformer_blocks.1.attn.to_out.0.bias', 'transformer_blocks.1.attn.add_k_proj.bias', 'single_transformer_blocks.8.proj_mlp.weight', 'transformer_blocks.1.ff.net.2.weight', 'single_transformer_blocks.4.norm.linear.bias', 'single_transformer_blocks.12.proj_out.weight', 'transformer_blocks.0.ff.net.0.proj.weight', 'single_transformer_blocks.9.attn.to_k.bias', 'single_transformer_blocks.19.norm.linear.weight', 'single_transformer_blocks.8.proj_out.weight', 'single_transformer_blocks.10.attn.to_k.weight', 'single_transformer_blocks.0.attn.to_k.bias', 'single_transformer_blocks.7.attn.to_k.weight', 'single_transformer_blocks.9.attn.to_v.weight', 'single_transformer_blocks.13.proj_out.bias', 'single_transformer_blocks.1.attn.to_v.bias', 'single_transformer_blocks.3.proj_mlp.weight', 'single_transformer_blocks.6.attn.to_v.weight', 'single_transformer_blocks.16.attn.to_q.weight', 'single_transformer_blocks.13.proj_mlp.weight', 'single_transformer_blocks.19.norm.linear.bias', 'single_transformer_blocks.5.norm.linear.bias', 'single_transformer_blocks.10.attn.to_q.bias', 'single_transformer_blocks.17.attn.to_v.bias', 'single_transformer_blocks.0.attn.to_v.weight', 'single_transformer_blocks.10.attn.to_v.weight', 'single_transformer_blocks.3.attn.to_k.weight', 'single_transformer_blocks.1.norm.linear.bias', 'single_transformer_blocks.6.attn.to_q.bias', 'single_transformer_blocks.11.norm.linear.bias', 'time_text_embed.timestep_embedder.linear_2.weight', 'time_text_embed.text_embedder.linear_2.bias', 'single_transformer_blocks.3.norm.linear.bias', 'transformer_blocks.0.ff.net.0.proj.bias', 'single_transformer_blocks.3.attn.to_q.weight', 'single_transformer_blocks.17.proj_mlp.weight', 'transformer_blocks.3.ff_context.net.0.proj.weight', 'single_transformer_blocks.12.attn.to_q.bias', 'single_transformer_blocks.5.proj_mlp.weight', 'single_transformer_blocks.0.attn.to_q.bias', 'single_transformer_blocks.3.proj_out.weight', 'transformer_blocks.4.ff.net.2.bias', 'single_transformer_blocks.4.proj_mlp.bias', 'single_transformer_blocks.2.proj_mlp.weight', 'single_transformer_blocks.16.proj_mlp.bias', 'single_transformer_blocks.17.proj_mlp.bias', 'time_text_embed.text_embedder.linear_2.weight', 'single_transformer_blocks.8.attn.to_q.weight', 'single_transformer_blocks.8.attn.to_k.weight', 'single_transformer_blocks.14.attn.to_k.weight', 'single_transformer_blocks.3.attn.to_q.bias', 'single_transformer_blocks.1.proj_out.bias', 'time_text_embed.timestep_embedder.linear_2.bias', 'single_transformer_blocks.13.attn.to_k.weight', 'single_transformer_blocks.11.proj_out.bias', 'x_embedder.bias', 'transformer_blocks.1.attn.to_q.bias', 'single_transformer_blocks.15.proj_mlp.weight', 'single_transformer_blocks.2.norm.linear.weight', 'single_transformer_blocks.12.attn.to_k.weight']\n","You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"]},{"name":"stdout","output_type":"stream","text":["โœ… Transformer loaded successfully. VRAM: 9.09 GB\n","โœ… LoRA applied successfully\n","VRAM after LoRA: 9.16 GB\n"]}],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a โ€“ Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"โœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"โœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":819},"executionInfo":{"elapsed":9783,"status":"error","timestamp":1775042482579,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"rx5ngImGxoTT","outputId":"266315e2-e08c-43e8-890b-801d3b63c768"},"outputs":[{"name":"stderr","output_type":"stream","text":["Loading latents: 100%|โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆ| 250/250 [00:04<00:00, 54.01it/s]\n"]},{"name":"stdout","output_type":"stream","text":["Latents shape: torch.Size([250, 16, 128, 128])\n","\n","๐Ÿš€ Starting training...\n"]},{"ename":"RuntimeError","evalue":"Tensors must have same number of dimensions: got 4 and 5","output_type":"error","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_1539/4055019116.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"\\n๐Ÿš€ Starting training...\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2172\u001b[0m \u001b[0mhf_hub_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menable_progress_bars\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2173\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2174\u001b[0;31m return inner_training_loop(\n\u001b[0m\u001b[1;32m 2175\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2176\u001b[0m \u001b[0mresume_from_checkpoint\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mresume_from_checkpoint\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36m_inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2534\u001b[0m )\n\u001b[1;32m 2535\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2536\u001b[0;31m \u001b[0mtr_loss_step\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2537\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2538\u001b[0m if (\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mtraining_step\u001b[0;34m(self, model, inputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 3807\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3808\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_loss_context_manager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3809\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_loss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3810\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3811\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/tmp/ipykernel_1539/4055019116.py\u001b[0m in \u001b[0;36mcompute_loss\u001b[0;34m(self, model, inputs, return_outputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 75\u001b[0m \u001b[0mimg_ids\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_tokens\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdevice\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbfloat16\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 76\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 77\u001b[0;31m model_output = model(\n\u001b[0m\u001b[1;32m 78\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnoisy_latents\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[0mtimestep\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimesteps\u001b[0m \u001b[0;34m*\u001b[0m \u001b[0;36m1000\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 821\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 822\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 823\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 824\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 825\u001b[0m \u001b[0;31m# To act like a decorator so that it can be popped when doing `extract_model_from_parallel`\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/accelerate/utils/operations.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 809\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 810\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__call__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 811\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mconvert_to_fp32\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 812\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 813\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__getstate__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/amp/autocast_mode.py\u001b[0m in \u001b[0;36mdecorate_autocast\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mdecorate_autocast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautocast_instance\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m decorate_autocast.__script_unsupported = ( # type: ignore[attr-defined]\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/peft/peft_model.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_enable_peft_forward_hooks\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 920\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspecial_peft_forward_args\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 921\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_base_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 922\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 923\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mgenerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/utils/peft_utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 313\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 314\u001b[0m \u001b[0;31m# Execute the forward pass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 315\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mforward_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 316\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mresult\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 317\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, encoder_hidden_states, pooled_projections, timestep, img_ids, txt_ids, guidance, joint_attention_kwargs, controlnet_block_samples, controlnet_single_block_samples, return_dict, controlnet_blocks_repeat)\u001b[0m\n\u001b[1;32m 724\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 725\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 726\u001b[0;31m encoder_hidden_states, hidden_states = block(\n\u001b[0m\u001b[1;32m 727\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 728\u001b[0m \u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, encoder_hidden_states, temb, image_rotary_emb, joint_attention_kwargs)\u001b[0m\n\u001b[1;32m 451\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 452\u001b[0m \u001b[0;31m# Attention.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 453\u001b[0;31m attention_outputs = self.attn(\n\u001b[0m\u001b[1;32m 454\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnorm_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 455\u001b[0m \u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnorm_encoder_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1774\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1775\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1776\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1777\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1778\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1785\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1786\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1787\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1788\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1789\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb, **kwargs)\u001b[0m\n\u001b[1;32m 350\u001b[0m )\n\u001b[1;32m 351\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mw\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mattn_parameters\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 352\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mprocessor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhidden_states\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mencoder_hidden_states\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattention_mask\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mimage_rotary_emb\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 353\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 354\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/diffusers/models/transformers/transformer_flux.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, attn, hidden_states, encoder_hidden_states, attention_mask, image_rotary_emb)\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0mencoder_key\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mattn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm_added_k\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mencoder_key\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 109\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 110\u001b[0;31m \u001b[0mquery\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mencoder_query\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mquery\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 111\u001b[0m \u001b[0mkey\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mencoder_key\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 112\u001b[0m \u001b[0mvalue\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mencoder_value\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mRuntimeError\u001b[0m: Tensors must have same number of dimensions: got 4 and 5"]}],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 โ†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\n๐Ÿš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"DRoYrgC-rbkt"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nโœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]},{"cell_type":"markdown","metadata":{"id":"wqiYKR-inOlo"},"source":["lora code original"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","colab":{"background_save":true},"id":"fYbQfjC9RBWY"},"outputs":[],"source":["# ============================= CELL 4: LoRA Training on FLUX.2-klein-base-4B (Clean & Reliable) =============================\n","# @title 4. LoRA Training โ€” FLUX.2-klein-base-4B + 768-dim Distilled Qwen\n","\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import gc\n","from torch.utils.data import Dataset\n","from tqdm import tqdm\n","from google.colab import drive\n","from transformers import Trainer, TrainingArguments, set_seed\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"=== CELL 4 START - Clean Restart ===\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== 1. Load Distilled 768-dim Encoder ======================\n","print(\"\\n[1/5] Loading distilled 768-dim Qwen encoder...\")\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_qwen = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\",\n"," trust_remote_code=True,\n"," low_cpu_mem_usage=True\n",")\n","\n","student_model = PeftModel.from_pretrained(base_qwen, DISTILLED_DIR)\n","student_model.eval()\n","\n","projection = nn.Linear(base_qwen.config.hidden_size, 768).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"[DEBUG] Distilled encoder loaded (768-dim)\")\n","\n","# ====================== 2. Load Data ======================\n","print(\"\\n[2/5] Loading texts and latents...\")\n","\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","print(f\"[DEBUG] Loaded {len(texts)} texts\")\n","\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","latents = []\n","for lf in tqdm(latent_files, desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=False)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents.append(latent)\n","\n","latents = torch.stack(latents)\n","print(f\"[DEBUG] Latents shape: {latents.shape}\")\n","\n","# ====================== 3. Dataset ======================\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, texts):\n"," self.latents = latents\n"," self.texts = texts\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\"latent\": self.latents[idx], \"text\": self.texts[idx]}\n","\n","dataset = FluxLoRADataset(latents, texts)\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"texts\": [item[\"text\"] for item in batch]\n"," }\n","\n","# ====================== 4. Load Transformer + LoRA ======================\n","print(\"\\n[3/5] Loading FLUX.2-klein-base-4B transformer...\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False,\n",").to(\"cuda\")\n","\n","print(f\"[DEBUG] Transformer loaded. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\", \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"[DEBUG] LoRA applied successfully\")\n","\n","# ====================== 5. Trainer with Simple Repeat Trick ======================\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," raw_texts = inputs[\"texts\"]\n","\n"," # Get 768-dim embedding\n"," text_inputs = tokenizer(raw_texts, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = student_model(**text_inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," text_emb_768 = projection(hidden).to(dtype=torch.bfloat16) # (B, 768)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n","\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # Simple repeat trick for encoder_hidden_states (most stable for single-token)\n"," encoder_hidden_states = text_emb_768.unsqueeze(1).repeat(1, 1, 10) # (B, 1, 7680)\n","\n"," # 2D ids (no batch dimension)\n"," txt_ids = torch.zeros((1, 3), device=latents.device, dtype=torch.bfloat16)\n"," img_ids = torch.zeros((latents.shape[2] * latents.shape[3], 3), device=latents.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=text_emb_768,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," print(f\"[DEBUG] Loss: {loss.item():.6f} | pooled: {text_emb_768.shape} | encoder: {encoder_hidden_states.shape}\")\n"," return (loss, model_output) if return_outputs else loss\n","\n","# ====================== Training ======================\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\n๐Ÿš€ Starting LoRA training with simple repeat trick...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","final_lora_dir = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nโœ… Training completed!\")\n","print(f\" LoRA saved to: {final_lora_dir}\")\n","print(\" You can now use this LoRA with your 768-dim distilled Qwen for inference.\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file +{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":16012,"status":"ok","timestamp":1775044094232,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"9QVc2_k_bL3P","outputId":"595bc17c-d374-4222-e293-4fd31d520bbc"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= CELL 1.a: Extract Data + Encode Latents (FLUX.2-klein VAE) =============================\n","# @title 1.a โ€“ Prepare Images and Encode with Correct FLUX.2 VAE\n","\n","import os\n","import zipfile\n","import torch\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import set_seed\n","\n","set_seed(42) # if you have it imported, otherwise add: from transformers import set_seed\n","drive.mount('/content/drive', force_remount=True)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","# ====================== 1. Extract Data ======================\n","print(\"๐Ÿ“ฆ Extracting zip...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"โœ… Found {len(image_files)} images\")\n","\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"โœ… Loaded {len(texts)} captions\")\n","\n","# ====================== 2. Encode Images โ†’ FLUX.2 VAE Latents (Correct for klein) ======================\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","if os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files):\n"," print(f\"โœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"\\n๐ŸŒ€ Encoding images to FLUX.2 VAE latents (recommended for FLUX.2-klein)...\")\n","\n"," # Use FLUX.2 VAE (not FLUX.1-dev)\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.2-dev\", # or \"black-forest-labs/FLUX.2-klein-4B\" if it has vae subfolder\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"โœ… Latents saved to {latent_dir} (using FLUX.2 VAE)\")"],"metadata":{"id":"mBSTd5Cb7nmF","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 1.b: Teacher Embeddings + Distill 768-dim Encoder =============================\n","# @title 1.b โ€“ Distill Qwen2.5-0.5B to 768-dim (using your 250 texts)\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from transformers import AutoTokenizer, AutoModel\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from transformers import Trainer, TrainingArguments, set_seed\n","\n","set_seed(42)\n","\n","# ====================== 3. Compute Teacher Embeddings (768-dim) ======================\n","print(\"\\n๐Ÿ“ Computing teacher embeddings (Qwen3-Embedding-0.6B) ...\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","teacher_model.eval()\n","\n","teacher_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu() # (768,)\n"," teacher_embeddings.append(emb)\n","\n","teacher_embeddings_768 = torch.stack(teacher_embeddings)\n","print(f\"โœ… Teacher embeddings shape: {teacher_embeddings_768.shape}\")\n","\n","# Save teacher embeddings\n","torch.save({\n"," \"embeddings\": teacher_embeddings_768,\n"," \"texts\": texts,\n"," \"dim\": 768\n","}, \"/content/drive/MyDrive/qwen_embeddings_768.pt\")\n","\n","del teacher_model\n","torch.cuda.empty_cache()\n","\n","# ====================== 4. Distill Student (Qwen2.5-0.5B โ†’ 768-dim) ======================\n","print(\"\\n๐Ÿ‘จโ€๐ŸŽ“ Distilling student Qwen2.5-0.5B to 768-dim...\")\n","\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","base_student = AutoModel.from_pretrained(\n"," student_model_name,\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_student, lora_config)\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.train()\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_768)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n","class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\").to(\"cuda\") # (B, 768)\n","\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1) # (B, student_hidden)\n"," student_emb = projection(hidden) # (B, 768)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.25 * mse_loss + 0.75 * cos_loss\n","\n"," return (loss, outputs) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_768\",\n"," per_device_train_batch_size=4,\n"," num_train_epochs=50,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"๐Ÿš€ Starting distillation to 768-dim...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","distilled_save_dir = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","os.makedirs(distilled_save_dir, exist_ok=True)\n","student_model.save_pretrained(distilled_save_dir)\n","tokenizer.save_pretrained(distilled_save_dir)\n","torch.save(projection.state_dict(), f\"{distilled_save_dir}/projection.pth\")\n","\n","print(f\"\\nโœ… SUCCESS! 768-dim distilled encoder saved to {distilled_save_dir}\")\n","print(f\" Latents are ready in {latent_dir}\")\n","print(\" You can now run Cell 2.a (load models) + updated Cell 2.b (test forward pass).\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"id":"AV4tp4G87zWH","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 1.c: Pre-compute Teacher & Student Embeddings =============================\n","# @title 1.c โ€“ Pre-compute and save embeddings from both teacher and distilled student\n","\n","import torch\n","import torch.nn.functional as F\n","from tqdm import tqdm\n","from google.colab import drive\n","from transformers import AutoTokenizer, AutoModel # Added this import\n","from peft import PeftModel # Added this import\n","import torch.nn as nn # Added this import for nn.Linear\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Load your saved teacher data (from Cell 1.b)\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","teacher_embs = data[\"embeddings\"].to(\"cuda\") # (250, 768)\n","\n","print(f\"Loaded {len(texts)} texts and teacher embeddings.\")\n","\n","# Load distilled student (same as in Cell 2.a)\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n",")\n","\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR)\n","student = student.to(\"cuda\")\n","student.eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"โœ… Distilled student + projection loaded.\")\n","\n","# Pre-compute student embeddings\n","student_embs_list = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Computing student embeddings\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1) # (1, hidden_size)\n"," emb_768 = projection(hidden).to(torch.float32) # (1, 768)\n"," student_embs_list.append(emb_768.squeeze(0))\n","\n","student_embs = torch.stack(student_embs_list) # (250, 768)\n","\n","# Compute alignment stats\n","cos_sims = []\n","for i in range(len(texts)):\n"," cos = F.cosine_similarity(student_embs[i], teacher_embs[i], dim=0)\n"," cos_sims.append(cos.item())\n","\n","cos_sims = torch.tensor(cos_sims)\n","print(\"\\n๐Ÿ“Š Alignment Summary (Teacher vs Student):\")\n","print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\" Min: {cos_sims.min().item():.4f} | Max: {cos_sims.max().item():.4f} | Std: {cos_sims.std().item():.4f}\")\n","\n","# Save everything\n","save_path = \"/content/drive/MyDrive/qwen_embeddings_768_full.pt\"\n","torch.save({\n"," \"texts\": texts,\n"," \"teacher_embeddings\": teacher_embs.cpu(), # (250, 768)\n"," \"student_embeddings\": student_embs.cpu(), # (250, 768)\n"," \"cosine_similarities\": cos_sims,\n"," \"dim\": 768\n","}, save_path)\n","\n","print(f\"\\nโœ… All embeddings saved to {save_path}\")\n","print(\" You can now use this file in Cell 2.x for evaluation or LoRA training.\")\n","torch.cuda.empty_cache()"],"metadata":{"id":"qF8kMiOk85EY","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Test stuff in cell 2"],"metadata":{"id":"dz4oMmJ79bxQ"}},{"cell_type":"code","source":["# ============================= CELL 2.a: Load Models & Data for Evaluation =============================\n","# @title 2.a โ€“ Load distilled student, transformer, and pre-computed embeddings\n","\n","import torch\n","import torch.nn as nn\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","from diffusers import FluxTransformer2DModel\n","from google.colab import drive\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== 1. Load Distilled Student (768-dim) ======================\n","print(\"๐Ÿ”„ Loading distilled Qwen2.5-0.5B (768-dim)...\")\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n",")\n","\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR)\n","student = student.to(\"cuda\")\n","student.eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"โœ… Distilled student + projection loaded.\")\n","print(f\" Student hidden size: {base_student.config.hidden_size} โ†’ 768-dim output\")\n","\n","# ====================== 2. Load FLUX.2-klein Transformer ======================\n","print(\"\\n๐Ÿ”„ Loading FLUX.2-klein transformer...\")\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False\n",").to(\"cuda\")\n","\n","print(\"โœ… FLUX.2-klein transformer loaded on CUDA\")\n","\n","# ====================== 3. Load Pre-computed Embeddings ======================\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768_full.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","teacher_embs = data[\"teacher_embeddings\"].to(\"cuda\") # (250, 768)\n","student_embs = data[\"student_embeddings\"].to(\"cuda\") # (250, 768)\n","\n","print(f\"โœ… Loaded pre-computed embeddings for {len(texts)} texts\")\n","print(f\" Teacher shape: {teacher_embs.shape} | Student shape: {student_embs.shape}\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"id":"8B4f-gxe9kpC","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.b: Alignment Test (Teacher vs Student) =============================\n","# @title 2.b โ€“ Cosine similarity across all 250 embeddings\n","\n","import torch\n","import torch.nn.functional as F\n","from tqdm import tqdm\n","\n","print(\"๐Ÿ”ฌ Computing alignment between teacher and student on all 250 texts...\")\n","\n","cos_sims = []\n","with torch.no_grad():\n"," for i in tqdm(range(len(texts)), desc=\"Alignment check\"):\n"," cos = F.cosine_similarity(student_embs[i], teacher_embs[i], dim=0)\n"," cos_sims.append(cos.item())\n","\n","cos_sims = torch.tensor(cos_sims)\n","\n","print(\"\\nโœ… ALIGNMENT RESULTS (Teacher vs Distilled Student):\")\n","print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\" Minimum: {cos_sims.min().item():.4f}\")\n","print(f\" Maximum: {cos_sims.max().item():.4f}\")\n","print(f\" Standard deviation: {cos_sims.std().item():.4f}\")\n","\n","if cos_sims.mean().item() > 0.94:\n"," print(\"๐ŸŽ‰ Excellent alignment! Your distillation is high quality.\")\n","elif cos_sims.mean().item() > 0.90:\n"," print(\"โœ… Good alignment. Safe for LoRA training.\")\n","else:\n"," print(\"โš ๏ธ Alignment is moderate. Consider more distillation epochs or higher LoRA rank.\")"],"metadata":{"id":"kMiMZmie9n1V","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.c: Fixed with 64โ†’128 Projection for Klein =============================\n","# @title 2.c โ€“ Test with standard packing + small 64โ†’128 projection\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","\n","text = texts[0]\n","print(f\"๐Ÿงช Test text: {text[:100]}...\")\n","\n","# 1. Student embedding\n","inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_768 = projection(hidden).to(torch.bfloat16)\n","\n","print(f\"โœ… emb_768 shape: {emb_768.shape}\")\n","\n","# 2. Text side (correct)\n","encoder_hidden = emb_768.unsqueeze(1).repeat(1, 1, 10) # (1, 1, 7680)\n","pooled_projections = emb_768\n","\n","# 3. Rotary IDs\n","txt_ids = torch.zeros((1, 3), device=\"cuda\", dtype=torch.bfloat16)\n","img_ids = torch.zeros((4096, 3), device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 4. Latent packing (64 ch) + projection to 128\n","dummy_latent = torch.randn(1, 16, 128, 128, device=\"cuda\", dtype=torch.bfloat16)\n","b, c, h, w = dummy_latent.shape\n","latent_packed = dummy_latent.view(b, c, h//2, 2, w//2, 2)\n","latent_packed = latent_packed.permute(0, 2, 4, 1, 3, 5).contiguous()\n","hidden_states = latent_packed.view(b, (h//2)*(w//2), c * 4) # (1, 4096, 64)\n","\n","# Small fixed projection 64 โ†’ 128\n","proj_64_to_128 = nn.Linear(64, 128, bias=False, dtype=torch.bfloat16, device=\"cuda\")\n","hidden_states = proj_64_to_128(hidden_states)\n","\n","print(f\"hidden_states after 64โ†’128: {hidden_states.shape}\")\n","\n","timestep = torch.tensor([1000.0], device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 5. Forward pass\n","print(\"\\n=== Running forward pass ===\")\n","try:\n"," output = transformer(\n"," hidden_states=hidden_states,\n"," timestep=timestep,\n"," encoder_hidden_states=encoder_hidden,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n"," print(\"๐ŸŽ‰ SUCCESS! Forward pass works.\")\n"," print(f\"Output shape: {output.shape}\")\n","except Exception as e:\n"," print(\"โŒ Failed:\")\n"," print(str(e))"],"metadata":{"cellView":"form","id":"9c0V4D7sCP2B"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Cell 3\n"],"metadata":{"id":"dz6FDD1aBSCt"}},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["๐Ÿ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# CELL 3: Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"๐Ÿ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past thispoint. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"sWEzuqsmvKua","cellView":"form"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"โœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\n๐Ÿ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"โœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\n๐Ÿ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"8hDfeHlNvPWE","cellView":"form"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"โœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"px5wQJZ2vTUf","cellView":"form"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup โ€“ Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"โœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"llxOztXpvYMO","cellView":"form"},"outputs":[],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a โ€“ Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"โœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"โœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"rx5ngImGxoTT","cellView":"form"},"outputs":[],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 โ†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\n๐Ÿš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"DRoYrgC-rbkt","cellView":"form"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nโœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0} \ No newline at end of file