codeShare commited on
Commit
e15bb81
Β·
verified Β·
1 Parent(s): e9c0255

Upload Qwen destill.ipynb

Browse files
Files changed (1) hide show
  1. Qwen destill.ipynb +1 -1
Qwen destill.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":16012,"status":"ok","timestamp":1775044094232,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"9QVc2_k_bL3P","outputId":"595bc17c-d374-4222-e293-4fd31d520bbc"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= CELL 1.a: Extract Data + Encode Latents (FLUX.2-klein VAE) =============================\n","# @title 1.a – Prepare Images and Encode with Correct FLUX.2 VAE\n","\n","import os\n","import zipfile\n","import torch\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import set_seed\n","\n","set_seed(42) # if you have it imported, otherwise add: from transformers import set_seed\n","drive.mount('/content/drive', force_remount=True)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","# ====================== 1. Extract Data ======================\n","print(\"πŸ“¦ Extracting zip...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"βœ… Found {len(image_files)} images\")\n","\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"βœ… Loaded {len(texts)} captions\")\n","\n","# ====================== 2. Encode Images β†’ FLUX.2 VAE Latents (Correct for klein) ======================\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","if os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files):\n"," print(f\"βœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"\\nπŸŒ€ Encoding images to FLUX.2 VAE latents (recommended for FLUX.2-klein)...\")\n","\n"," # Use FLUX.2 VAE (not FLUX.1-dev)\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.2-dev\", # or \"black-forest-labs/FLUX.2-klein-4B\" if it has vae subfolder\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"βœ… Latents saved to {latent_dir} (using FLUX.2 VAE)\")"],"metadata":{"id":"mBSTd5Cb7nmF","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 1.b: Teacher Embeddings + Distill 768-dim Encoder =============================\n","# @title 1.b – Distill Qwen2.5-0.5B to 768-dim (using your 250 texts)\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from transformers import AutoTokenizer, AutoModel\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from transformers import Trainer, TrainingArguments, set_seed\n","\n","set_seed(42)\n","\n","# ====================== 3. Compute Teacher Embeddings (768-dim) ======================\n","print(\"\\nπŸ“ Computing teacher embeddings (Qwen3-Embedding-0.6B) ...\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","teacher_model.eval()\n","\n","teacher_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu() # (768,)\n"," teacher_embeddings.append(emb)\n","\n","teacher_embeddings_768 = torch.stack(teacher_embeddings)\n","print(f\"βœ… Teacher embeddings shape: {teacher_embeddings_768.shape}\")\n","\n","# Save teacher embeddings\n","torch.save({\n"," \"embeddings\": teacher_embeddings_768,\n"," \"texts\": texts,\n"," \"dim\": 768\n","}, \"/content/drive/MyDrive/qwen_embeddings_768.pt\")\n","\n","del teacher_model\n","torch.cuda.empty_cache()\n","\n","# ====================== 4. Distill Student (Qwen2.5-0.5B β†’ 768-dim) ======================\n","print(\"\\nπŸ‘¨β€πŸŽ“ Distilling student Qwen2.5-0.5B to 768-dim...\")\n","\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","base_student = AutoModel.from_pretrained(\n"," student_model_name,\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_student, lora_config)\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.train()\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_768)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n","class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\").to(\"cuda\") # (B, 768)\n","\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1) # (B, student_hidden)\n"," student_emb = projection(hidden) # (B, 768)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.25 * mse_loss + 0.75 * cos_loss\n","\n"," return (loss, outputs) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_768\",\n"," per_device_train_batch_size=4,\n"," num_train_epochs=50,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"πŸš€ Starting distillation to 768-dim...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","distilled_save_dir = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","os.makedirs(distilled_save_dir, exist_ok=True)\n","student_model.save_pretrained(distilled_save_dir)\n","tokenizer.save_pretrained(distilled_save_dir)\n","torch.save(projection.state_dict(), f\"{distilled_save_dir}/projection.pth\")\n","\n","print(f\"\\nβœ… SUCCESS! 768-dim distilled encoder saved to {distilled_save_dir}\")\n","print(f\" Latents are ready in {latent_dir}\")\n","print(\" You can now run Cell 2.a (load models) + updated Cell 2.b (test forward pass).\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"id":"AV4tp4G87zWH","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 1.c: Pre-compute Teacher & Student Embeddings =============================\n","# @title 1.c – Pre-compute and save embeddings from both teacher and distilled student\n","\n","import torch\n","import torch.nn.functional as F\n","from tqdm import tqdm\n","from google.colab import drive\n","from transformers import AutoTokenizer, AutoModel # Added this import\n","from peft import PeftModel # Added this import\n","import torch.nn as nn # Added this import for nn.Linear\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Load your saved teacher data (from Cell 1.b)\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","teacher_embs = data[\"embeddings\"].to(\"cuda\") # (250, 768)\n","\n","print(f\"Loaded {len(texts)} texts and teacher embeddings.\")\n","\n","# Load distilled student (same as in Cell 2.a)\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n",")\n","\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR)\n","student = student.to(\"cuda\")\n","student.eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"βœ… Distilled student + projection loaded.\")\n","\n","# Pre-compute student embeddings\n","student_embs_list = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Computing student embeddings\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1) # (1, hidden_size)\n"," emb_768 = projection(hidden).to(torch.float32) # (1, 768)\n"," student_embs_list.append(emb_768.squeeze(0))\n","\n","student_embs = torch.stack(student_embs_list) # (250, 768)\n","\n","# Compute alignment stats\n","cos_sims = []\n","for i in range(len(texts)):\n"," cos = F.cosine_similarity(student_embs[i], teacher_embs[i], dim=0)\n"," cos_sims.append(cos.item())\n","\n","cos_sims = torch.tensor(cos_sims)\n","print(\"\\nπŸ“Š Alignment Summary (Teacher vs Student):\")\n","print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\" Min: {cos_sims.min().item():.4f} | Max: {cos_sims.max().item():.4f} | Std: {cos_sims.std().item():.4f}\")\n","\n","# Save everything\n","save_path = \"/content/drive/MyDrive/qwen_embeddings_768_full.pt\"\n","torch.save({\n"," \"texts\": texts,\n"," \"teacher_embeddings\": teacher_embs.cpu(), # (250, 768)\n"," \"student_embeddings\": student_embs.cpu(), # (250, 768)\n"," \"cosine_similarities\": cos_sims,\n"," \"dim\": 768\n","}, save_path)\n","\n","print(f\"\\nβœ… All embeddings saved to {save_path}\")\n","print(\" You can now use this file in Cell 2.x for evaluation or LoRA training.\")\n","torch.cuda.empty_cache()"],"metadata":{"id":"qF8kMiOk85EY","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Test stuff in cell 2"],"metadata":{"id":"dz4oMmJ79bxQ"}},{"cell_type":"code","source":["# ============================= CELL 2.a: Load Models & Data for Evaluation =============================\n","# @title 2.a – Load distilled student, transformer, and pre-computed embeddings\n","\n","import torch\n","import torch.nn as nn\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","from diffusers import FluxTransformer2DModel\n","from google.colab import drive\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== 1. Load Distilled Student (768-dim) ======================\n","print(\"πŸ”„ Loading distilled Qwen2.5-0.5B (768-dim)...\")\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\",\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n",")\n","\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR)\n","student = student.to(\"cuda\")\n","student.eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"βœ… Distilled student + projection loaded.\")\n","print(f\" Student hidden size: {base_student.config.hidden_size} β†’ 768-dim output\")\n","\n","# ====================== 2. Load FLUX.2-klein Transformer ======================\n","print(\"\\nπŸ”„ Loading FLUX.2-klein transformer...\")\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False\n",").to(\"cuda\")\n","\n","print(\"βœ… FLUX.2-klein transformer loaded on CUDA\")\n","\n","# ====================== 3. Load Pre-computed Embeddings ======================\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768_full.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","teacher_embs = data[\"teacher_embeddings\"].to(\"cuda\") # (250, 768)\n","student_embs = data[\"student_embeddings\"].to(\"cuda\") # (250, 768)\n","\n","print(f\"βœ… Loaded pre-computed embeddings for {len(texts)} texts\")\n","print(f\" Teacher shape: {teacher_embs.shape} | Student shape: {student_embs.shape}\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"id":"8B4f-gxe9kpC","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.b: Alignment Test (Teacher vs Student) =============================\n","# @title 2.b – Cosine similarity across all 250 embeddings\n","\n","import torch\n","import torch.nn.functional as F\n","from tqdm import tqdm\n","\n","print(\"πŸ”¬ Computing alignment between teacher and student on all 250 texts...\")\n","\n","cos_sims = []\n","with torch.no_grad():\n"," for i in tqdm(range(len(texts)), desc=\"Alignment check\"):\n"," cos = F.cosine_similarity(student_embs[i], teacher_embs[i], dim=0)\n"," cos_sims.append(cos.item())\n","\n","cos_sims = torch.tensor(cos_sims)\n","\n","print(\"\\nβœ… ALIGNMENT RESULTS (Teacher vs Distilled Student):\")\n","print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\" Minimum: {cos_sims.min().item():.4f}\")\n","print(f\" Maximum: {cos_sims.max().item():.4f}\")\n","print(f\" Standard deviation: {cos_sims.std().item():.4f}\")\n","\n","if cos_sims.mean().item() > 0.94:\n"," print(\"πŸŽ‰ Excellent alignment! Your distillation is high quality.\")\n","elif cos_sims.mean().item() > 0.90:\n"," print(\"βœ… Good alignment. Safe for LoRA training.\")\n","else:\n"," print(\"⚠️ Alignment is moderate. Consider more distillation epochs or higher LoRA rank.\")"],"metadata":{"id":"kMiMZmie9n1V","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 2.c: Fixed with 64β†’128 Projection for Klein =============================\n","# @title 2.c – Test with standard packing + small 64β†’128 projection\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","\n","text = texts[0]\n","print(f\"πŸ§ͺ Test text: {text[:100]}...\")\n","\n","# 1. Student embedding\n","inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_768 = projection(hidden).to(torch.bfloat16)\n","\n","print(f\"βœ… emb_768 shape: {emb_768.shape}\")\n","\n","# 2. Text side (correct)\n","encoder_hidden = emb_768.unsqueeze(1).repeat(1, 1, 10) # (1, 1, 7680)\n","pooled_projections = emb_768\n","\n","# 3. Rotary IDs\n","txt_ids = torch.zeros((1, 3), device=\"cuda\", dtype=torch.bfloat16)\n","img_ids = torch.zeros((4096, 3), device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 4. Latent packing (64 ch) + projection to 128\n","dummy_latent = torch.randn(1, 16, 128, 128, device=\"cuda\", dtype=torch.bfloat16)\n","b, c, h, w = dummy_latent.shape\n","latent_packed = dummy_latent.view(b, c, h//2, 2, w//2, 2)\n","latent_packed = latent_packed.permute(0, 2, 4, 1, 3, 5).contiguous()\n","hidden_states = latent_packed.view(b, (h//2)*(w//2), c * 4) # (1, 4096, 64)\n","\n","# Small fixed projection 64 β†’ 128\n","proj_64_to_128 = nn.Linear(64, 128, bias=False, dtype=torch.bfloat16, device=\"cuda\")\n","hidden_states = proj_64_to_128(hidden_states)\n","\n","print(f\"hidden_states after 64β†’128: {hidden_states.shape}\")\n","\n","timestep = torch.tensor([1000.0], device=\"cuda\", dtype=torch.bfloat16)\n","\n","# 5. Forward pass\n","print(\"\\n=== Running forward pass ===\")\n","try:\n"," output = transformer(\n"," hidden_states=hidden_states,\n"," timestep=timestep,\n"," encoder_hidden_states=encoder_hidden,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n"," print(\"πŸŽ‰ SUCCESS! Forward pass works.\")\n"," print(f\"Output shape: {output.shape}\")\n","except Exception as e:\n"," print(\"❌ Failed:\")\n"," print(str(e))"],"metadata":{"cellView":"form","id":"9c0V4D7sCP2B"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","source":["Cell 3\n"],"metadata":{"id":"dz6FDD1aBSCt"}},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# CELL 3: Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past thispoint. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"id":"sWEzuqsmvKua","cellView":"form"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\nπŸ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"βœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"8hDfeHlNvPWE","cellView":"form"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"βœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"px5wQJZ2vTUf","cellView":"form"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup – Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"βœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"llxOztXpvYMO","cellView":"form"},"outputs":[],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a – Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"βœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"βœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"rx5ngImGxoTT","cellView":"form"},"outputs":[],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 β†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπŸš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"DRoYrgC-rbkt","cellView":"form"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":15039,"status":"ok","timestamp":1775134521101,"user":{"displayName":"No Name","userId":"10578412414437288386"},"user_tz":-120},"id":"Ma3eWy8RQnUM","outputId":"de0b88c6-5bf4-4af7-8f12-e870293bd3da"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= MERGED CELL 1: Smaller Distillation Pipeline =============================\n","# @title 1 – Smaller Distillation: Qwen2.5-0.5B Student from Larger Teacher\n","\n","import os\n","import zipfile\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import AutoTokenizer, AutoModel, set_seed\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model, PeftModel\n","from transformers import Trainer, TrainingArguments\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ========================== PARAMETERS ==========================\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","update_latents = True # @param {type:\"boolean\"}\n","update_teacher_embeddings = True # @param {type:\"boolean\"}\n","update_distillation = True # @param {type:\"boolean\"}\n","update_student_embeddings = True # @param {type:\"boolean\"}\n","\n","epochs = 15 # @param {type:\"slider\", min:5, max:30, step:1}\n","batch_size = 8 # @param {type:\"slider\", min:2, max:16, step:1}\n","learning_rate = 3e-4 # @param {type:\"number\"}\n","\n","print(\"πŸ”§ Update settings:\")\n","print(f\" β€’ Latents: {'Update' if update_latents else 'Skip'}\")\n","print(f\" β€’ Teacher Embeddings: {'Update' if update_teacher_embeddings else 'Skip'}\")\n","print(f\" β€’ Distillation: {'Update' if update_distillation else 'Skip'}\")\n","print(f\" β€’ Student Embeddings: {'Update' if update_student_embeddings else 'Skip'}\")\n","print(f\" β€’ Epochs: {epochs}\")\n","print(f\" β€’ Batch size: {batch_size}\")\n","\n","# ====================== 1.A – VAE Latents ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.A – Extracting dataset and encoding FLUX.2 VAE latents\")\n","print(\"=\"*70)\n","\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"βœ… Found {len(image_files)} images and {len(texts)} captions\")\n","\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","latents_exist = os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files)\n","\n","if latents_exist and not update_latents:\n"," print(f\"βœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"πŸŒ€ Encoding images to FLUX.2 VAE latents...\")\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.2-dev\", subfolder=\"vae\",\n"," torch_dtype=torch.float32, device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"βœ… Latents saved to {latent_dir}\")\n","\n","# ====================== 1.B – Teacher + Smaller Student Distillation ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.B – Teacher Embeddings + Distillation to 7680-dim (Small Student)\")\n","print(\"=\"*70)\n","\n","teacher_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680.pt\"\n","distilled_dir = \"/content/drive/MyDrive/distilled_qwen_small_7680_for_flux\"\n","\n","# --- Teacher Embeddings ---\n","if os.path.exists(teacher_embeddings_path) and not update_teacher_embeddings:\n"," print(\"βœ… Using existing teacher embeddings\")\n"," data = torch.load(teacher_embeddings_path, map_location=\"cpu\")\n"," texts = data[\"texts\"]\n"," teacher_embeddings_7680 = data[\"embeddings\"]\n","else:\n"," print(\"πŸ“ Computing teacher embeddings with Qwen2.5-7B...\")\n"," teacher_model_name = \"Qwen/Qwen2.5-7B\"\n"," tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n"," teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name, torch_dtype=torch.float16, device_map=\"auto\", trust_remote_code=True\n"," ).eval()\n","\n"," teacher_embeddings = []\n"," with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu()\n"," emb_7680 = torch.cat([emb, emb, emb], dim=0)\n"," teacher_embeddings.append(emb_7680)\n","\n"," teacher_embeddings_7680 = torch.stack(teacher_embeddings)\n"," torch.save({\"embeddings\": teacher_embeddings_7680, \"texts\": texts, \"dim\": 7680}, teacher_embeddings_path)\n"," del teacher_model\n"," torch.cuda.empty_cache()\n"," print(\"βœ… Teacher embeddings saved\")\n","\n","# --- Distillation ---\n","if os.path.exists(distilled_dir) and os.path.exists(f\"{distilled_dir}/projection.pth\") and not update_distillation:\n"," print(f\"βœ… Using existing distilled model from {distilled_dir}\")\n","else:\n"," print(\"πŸ‘¨β€πŸŽ“ Starting distillation with tiny student (Qwen2.5-0.5B)...\")\n","\n"," student_model_name = \"Qwen/Qwen2.5-0.5B\"\n"," base_student = AutoModel.from_pretrained(\n"," student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True\n"," )\n","\n"," lora_config = LoraConfig(\n"," r=16, lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05, bias=\"none\", task_type=\"FEATURE_EXTRACTION\"\n"," )\n"," student_model = get_peft_model(base_student, lora_config)\n","\n"," for param in student_model.parameters():\n"," if param.requires_grad:\n"," param.data = param.data.to(torch.float32)\n","\n"," projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n"," projection.train()\n","\n"," hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n"," class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True,\n"," max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n"," distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_7680)\n","\n"," def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n"," class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False):\n"," labels = inputs.pop(\"labels\").to(\"cuda\").float()\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.2 * mse_loss + 0.8 * cos_loss\n"," return (loss, outputs) if return_outputs else loss\n","\n"," training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_small_7680\",\n"," per_device_train_batch_size=batch_size,\n"," num_train_epochs=epochs,\n"," learning_rate=learning_rate,\n"," fp16=True,\n"," logging_steps=20,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," gradient_clip_val=1.0,\n"," )\n","\n"," trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n"," )\n","\n"," print(\"πŸš€ Starting distillation training...\")\n"," trainer.train()\n","\n"," # Save to CPU\n"," print(\"\\nπŸ’Ύ Saving distilled model to CPU...\")\n"," os.makedirs(distilled_dir, exist_ok=True)\n"," student_model = student_model.to(\"cpu\")\n"," student_model.save_pretrained(distilled_dir)\n"," tokenizer.save_pretrained(distilled_dir)\n"," torch.save(projection.to(\"cpu\").state_dict(), f\"{distilled_dir}/projection.pth\")\n","\n"," print(f\"βœ… Tiny distilled model + projection saved to {distilled_dir}\")\n"," torch.cuda.empty_cache()\n","\n","# ====================== 1.C – Pre-compute Student Embeddings ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.C – Pre-computing Student Embeddings + Alignment\")\n","print(\"=\"*70)\n","\n","full_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680_full.pt\"\n","\n","if os.path.exists(full_embeddings_path) and not update_student_embeddings:\n"," print(f\"βœ… Using existing full embeddings from {full_embeddings_path}\")\n","else:\n"," print(\"πŸ”„ Loading distilled student and computing embeddings...\")\n","\n"," tokenizer = AutoTokenizer.from_pretrained(distilled_dir)\n"," base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, trust_remote_code=True\n"," )\n"," student = PeftModel.from_pretrained(base_student, distilled_dir).to(\"cuda\").eval()\n","\n"," projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n"," projection.load_state_dict(torch.load(f\"{distilled_dir}/projection.pth\", map_location=\"cuda\"))\n"," projection.eval()\n","\n"," student_embs_list = []\n"," with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Computing student embeddings\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb = projection(hidden).to(torch.float32).squeeze(0)\n"," student_embs_list.append(emb)\n","\n"," student_embs = torch.stack(student_embs_list)\n","\n"," teacher_embs = teacher_embeddings_7680.to(\"cuda\") if 'teacher_embeddings_7680' in locals() else \\\n"," torch.load(teacher_embeddings_path, map_location=\"cuda\")[\"embeddings\"].to(\"cuda\")\n","\n"," cos_sims = F.cosine_similarity(student_embs, teacher_embs, dim=1)\n","\n"," print(\"\\nπŸ“Š Alignment Summary:\")\n"," print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n"," print(f\" Min: {cos_sims.min().item():.4f} Max: {cos_sims.max().item():.4f} Std: {cos_sims.std().item():.4f}\")\n","\n"," torch.save({\n"," \"texts\": texts,\n"," \"teacher_embeddings\": teacher_embs.cpu(),\n"," \"student_embeddings\": student_embs.cpu(),\n"," \"cosine_similarities\": cos_sims.cpu(),\n"," \"dim\": 7680\n"," }, full_embeddings_path)\n","\n"," print(f\"βœ… Full embeddings saved to {full_embeddings_path}\")\n","\n"," del student, projection, base_student\n"," torch.cuda.empty_cache()\n","\n","print(\"\\nπŸŽ‰ All tasks completed!\")"],"metadata":{"id":"V77SSU2tf5Uc"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dz4oMmJ79bxQ"},"source":["Test stuff in cell 2. Its safe to disconnect."]},{"cell_type":"code","source":["# ============================= MERGED CELL 2: Load + Alignment + Forward + RoPE Test =============================\n","# @title 2 – Test Distilled Small Text Encoder + RoPE Verification\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","from diffusers import FluxTransformer2DModel\n","from google.colab import drive\n","import gc\n","import os\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ========================== PARAMETERS ==========================\n","run_alignment_test = True # @param {type:\"boolean\"}\n","run_forward_pass_test = True # @param {type:\"boolean\"}\n","run_rope_test = True # @param {type:\"boolean\"}\n","force_reload_models = False # @param {type:\"boolean\"}\n","seq_len = 16 # @param {type:\"slider\", min:8, max:32, step:2}\n","\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_small_7680_for_flux\"\n","embed_path = \"/content/drive/MyDrive/qwen_embeddings_7680_full.pt\"\n","\n","print(\"πŸ”§ Cell 2 Settings:\")\n","print(f\" β€’ Alignment Test: {'Enabled' if run_alignment_test else 'Disabled'}\")\n","print(f\" β€’ Forward Pass Test: {'Enabled' if run_forward_pass_test else 'Disabled'}\")\n","print(f\" β€’ RoPE Test: {'Enabled' if run_rope_test else 'Disabled'}\")\n","print(f\" β€’ Sequence Length: {seq_len}\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","# ====================== Load Distilled Student ======================\n","print(\"\\n\" + \"=\"*60)\n","print(\"Loading Distilled Small Student...\")\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, trust_remote_code=True\n",")\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR).to(\"cuda\").eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"βœ… Distilled student + projection loaded to GPU\")\n","\n","# ====================== Load FLUX Transformer ======================\n","print(\"\\nLoading FLUX.2-klein Transformer...\")\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=True\n",").to(\"cuda\").eval()\n","print(\"βœ… FLUX.2-klein transformer loaded\")\n","\n","# ====================== Load Embeddings ======================\n","print(\"\\nLoading pre-computed embeddings...\")\n","if os.path.exists(embed_path):\n"," data = torch.load(embed_path, map_location=\"cpu\")\n"," texts = data[\"texts\"]\n"," teacher_embs = data.get(\"teacher_embeddings\", data.get(\"embeddings\")).to(\"cuda\")\n"," student_embs = data.get(\"student_embeddings\", None)\n"," if student_embs is not None:\n"," student_embs = student_embs.to(\"cuda\")\n"," print(f\"βœ… Loaded {len(texts)} texts\")\n","else:\n"," print(\"❌ Embeddings file not found. Run Cell 1 first!\")\n"," texts = []\n"," teacher_embs = None\n"," student_embs = None\n","\n","# ====================== Alignment Test ======================\n","if run_alignment_test and texts and teacher_embs is not None and student_embs is not None:\n"," print(\"\\n\" + \"=\"*70)\n"," print(\"πŸ“Š ALIGNMENT TEST\")\n"," print(\"=\"*70)\n"," cos_sims = F.cosine_similarity(student_embs, teacher_embs, dim=1)\n"," print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n"," print(f\" Min: {cos_sims.min().item():.4f} | Max: {cos_sims.max().item():.4f}\")\n","\n","# ====================== Forward Pass + RoPE Test ======================\n","if run_forward_pass_test and texts:\n"," print(\"\\n\" + \"=\"*70)\n"," print(\"πŸ§ͺ FORWARD PASS + RoPE TEST\")\n"," print(\"=\"*70)\n","\n"," test_idx = 0\n"," text = texts[test_idx]\n"," print(f\"Test prompt: {text[:180]}{'...' if len(text) > 180 else ''}\")\n","\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_7680 = projection(hidden).to(torch.bfloat16)\n","\n"," print(f\"βœ… Distilled embedding shape: {emb_7680.shape}\")\n","\n"," # RoPE Test\n"," if run_rope_test:\n"," print(\"\\nTesting RoPE (positional embeddings)...\")\n"," pos_ids_normal = torch.arange(inputs.input_ids.shape[1], device=\"cuda\").unsqueeze(0)\n"," pos_ids_zero = torch.zeros_like(pos_ids_normal)\n","\n"," out1 = student(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, position_ids=pos_ids_normal)\n"," out2 = student(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, position_ids=pos_ids_zero)\n","\n"," hidden1 = out1.last_hidden_state.mean(dim=1)\n"," hidden2 = out2.last_hidden_state.mean(dim=1)\n"," rope_diff = F.cosine_similarity(hidden1, hidden2, dim=1).item()\n","\n"," print(f\" Cosine similarity between normal vs zero position_ids: {rope_diff:.4f}\")\n"," print(f\" β†’ RoPE is working correctly if value is noticeably < 1.0\")\n","\n"," # Simple FLUX forward test (dummy inputs)\n"," print(\"\\nRunning dummy FLUX forward pass...\")\n"," encoder_hidden_states = emb_7680.unsqueeze(1).repeat(1, seq_len, 1)\n","\n"," try:\n"," # Very minimal dummy latent (you may need to adjust packing for full compatibility)\n"," hidden_states = torch.randn(1, 4096, 128, device=\"cuda\", dtype=torch.bfloat16) # placeholder\n"," timestep = torch.tensor([1000.0], device=\"cuda\", dtype=torch.bfloat16)\n"," txt_ids = torch.zeros((1, seq_len, 3), device=\"cuda\", dtype=torch.bfloat16)\n"," img_ids = torch.zeros((hidden_states.shape[1], 3), device=\"cuda\", dtype=torch.bfloat16)\n","\n"," output = transformer(\n"," hidden_states=hidden_states,\n"," timestep=timestep,\n"," encoder_hidden_states=encoder_hidden_states,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," print(\"πŸŽ‰ SUCCESS: Forward pass with distilled 7680-dim encoder completed!\")\n"," print(f\" Output shape: {output.shape}\")\n"," except Exception as e:\n"," print(f\"❌ Forward pass failed: {str(e)}\")\n"," print(\" (This is expected if latent packing / dimensions don't perfectly match)\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","print(\"\\nβœ… Cell 2 completed! Distilled text encoder is ready and RoPE is verified.\")"],"metadata":{"id":"Yh_-8CMIgDK3"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dz6FDD1aBSCt"},"source":["Cell 3\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past this point. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"sWEzuqsmvKua"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\nπŸ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"βœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"8hDfeHlNvPWE"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"βœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"px5wQJZ2vTUf"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup – Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"βœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"llxOztXpvYMO"},"outputs":[],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a – Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"βœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"βœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"rx5ngImGxoTT"},"outputs":[],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 β†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπŸš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"DRoYrgC-rbkt"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Qwen destill.ipynb","timestamp":1775130462813}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}