codeShare commited on
Commit
be7515a
Β·
verified Β·
1 Parent(s): e15bb81

Upload Qwen destill.ipynb

Browse files
Files changed (1) hide show
  1. Qwen destill.ipynb +1 -1
Qwen destill.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":15039,"status":"ok","timestamp":1775134521101,"user":{"displayName":"No Name","userId":"10578412414437288386"},"user_tz":-120},"id":"Ma3eWy8RQnUM","outputId":"de0b88c6-5bf4-4af7-8f12-e870293bd3da"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= MERGED CELL 1: Smaller Distillation Pipeline =============================\n","# @title 1 – Smaller Distillation: Qwen2.5-0.5B Student from Larger Teacher\n","\n","import os\n","import zipfile\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import AutoTokenizer, AutoModel, set_seed\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model, PeftModel\n","from transformers import Trainer, TrainingArguments\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ========================== PARAMETERS ==========================\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","update_latents = True # @param {type:\"boolean\"}\n","update_teacher_embeddings = True # @param {type:\"boolean\"}\n","update_distillation = True # @param {type:\"boolean\"}\n","update_student_embeddings = True # @param {type:\"boolean\"}\n","\n","epochs = 15 # @param {type:\"slider\", min:5, max:30, step:1}\n","batch_size = 8 # @param {type:\"slider\", min:2, max:16, step:1}\n","learning_rate = 3e-4 # @param {type:\"number\"}\n","\n","print(\"πŸ”§ Update settings:\")\n","print(f\" β€’ Latents: {'Update' if update_latents else 'Skip'}\")\n","print(f\" β€’ Teacher Embeddings: {'Update' if update_teacher_embeddings else 'Skip'}\")\n","print(f\" β€’ Distillation: {'Update' if update_distillation else 'Skip'}\")\n","print(f\" β€’ Student Embeddings: {'Update' if update_student_embeddings else 'Skip'}\")\n","print(f\" β€’ Epochs: {epochs}\")\n","print(f\" β€’ Batch size: {batch_size}\")\n","\n","# ====================== 1.A – VAE Latents ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.A – Extracting dataset and encoding FLUX.2 VAE latents\")\n","print(\"=\"*70)\n","\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"βœ… Found {len(image_files)} images and {len(texts)} captions\")\n","\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","latents_exist = os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files)\n","\n","if latents_exist and not update_latents:\n"," print(f\"βœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"πŸŒ€ Encoding images to FLUX.2 VAE latents...\")\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.2-dev\", subfolder=\"vae\",\n"," torch_dtype=torch.float32, device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"βœ… Latents saved to {latent_dir}\")\n","\n","# ====================== 1.B – Teacher + Smaller Student Distillation ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.B – Teacher Embeddings + Distillation to 7680-dim (Small Student)\")\n","print(\"=\"*70)\n","\n","teacher_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680.pt\"\n","distilled_dir = \"/content/drive/MyDrive/distilled_qwen_small_7680_for_flux\"\n","\n","# --- Teacher Embeddings ---\n","if os.path.exists(teacher_embeddings_path) and not update_teacher_embeddings:\n"," print(\"βœ… Using existing teacher embeddings\")\n"," data = torch.load(teacher_embeddings_path, map_location=\"cpu\")\n"," texts = data[\"texts\"]\n"," teacher_embeddings_7680 = data[\"embeddings\"]\n","else:\n"," print(\"πŸ“ Computing teacher embeddings with Qwen2.5-7B...\")\n"," teacher_model_name = \"Qwen/Qwen2.5-7B\"\n"," tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n"," teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name, torch_dtype=torch.float16, device_map=\"auto\", trust_remote_code=True\n"," ).eval()\n","\n"," teacher_embeddings = []\n"," with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu()\n"," emb_7680 = torch.cat([emb, emb, emb], dim=0)\n"," teacher_embeddings.append(emb_7680)\n","\n"," teacher_embeddings_7680 = torch.stack(teacher_embeddings)\n"," torch.save({\"embeddings\": teacher_embeddings_7680, \"texts\": texts, \"dim\": 7680}, teacher_embeddings_path)\n"," del teacher_model\n"," torch.cuda.empty_cache()\n"," print(\"βœ… Teacher embeddings saved\")\n","\n","# --- Distillation ---\n","if os.path.exists(distilled_dir) and os.path.exists(f\"{distilled_dir}/projection.pth\") and not update_distillation:\n"," print(f\"βœ… Using existing distilled model from {distilled_dir}\")\n","else:\n"," print(\"πŸ‘¨β€πŸŽ“ Starting distillation with tiny student (Qwen2.5-0.5B)...\")\n","\n"," student_model_name = \"Qwen/Qwen2.5-0.5B\"\n"," base_student = AutoModel.from_pretrained(\n"," student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True\n"," )\n","\n"," lora_config = LoraConfig(\n"," r=16, lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05, bias=\"none\", task_type=\"FEATURE_EXTRACTION\"\n"," )\n"," student_model = get_peft_model(base_student, lora_config)\n","\n"," for param in student_model.parameters():\n"," if param.requires_grad:\n"," param.data = param.data.to(torch.float32)\n","\n"," projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n"," projection.train()\n","\n"," hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n"," class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True,\n"," max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n"," distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_7680)\n","\n"," def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n"," class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False):\n"," labels = inputs.pop(\"labels\").to(\"cuda\").float()\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.2 * mse_loss + 0.8 * cos_loss\n"," return (loss, outputs) if return_outputs else loss\n","\n"," training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_small_7680\",\n"," per_device_train_batch_size=batch_size,\n"," num_train_epochs=epochs,\n"," learning_rate=learning_rate,\n"," fp16=True,\n"," logging_steps=20,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," gradient_clip_val=1.0,\n"," )\n","\n"," trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n"," )\n","\n"," print(\"πŸš€ Starting distillation training...\")\n"," trainer.train()\n","\n"," # Save to CPU\n"," print(\"\\nπŸ’Ύ Saving distilled model to CPU...\")\n"," os.makedirs(distilled_dir, exist_ok=True)\n"," student_model = student_model.to(\"cpu\")\n"," student_model.save_pretrained(distilled_dir)\n"," tokenizer.save_pretrained(distilled_dir)\n"," torch.save(projection.to(\"cpu\").state_dict(), f\"{distilled_dir}/projection.pth\")\n","\n"," print(f\"βœ… Tiny distilled model + projection saved to {distilled_dir}\")\n"," torch.cuda.empty_cache()\n","\n","# ====================== 1.C – Pre-compute Student Embeddings ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.C – Pre-computing Student Embeddings + Alignment\")\n","print(\"=\"*70)\n","\n","full_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680_full.pt\"\n","\n","if os.path.exists(full_embeddings_path) and not update_student_embeddings:\n"," print(f\"βœ… Using existing full embeddings from {full_embeddings_path}\")\n","else:\n"," print(\"πŸ”„ Loading distilled student and computing embeddings...\")\n","\n"," tokenizer = AutoTokenizer.from_pretrained(distilled_dir)\n"," base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, trust_remote_code=True\n"," )\n"," student = PeftModel.from_pretrained(base_student, distilled_dir).to(\"cuda\").eval()\n","\n"," projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n"," projection.load_state_dict(torch.load(f\"{distilled_dir}/projection.pth\", map_location=\"cuda\"))\n"," projection.eval()\n","\n"," student_embs_list = []\n"," with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Computing student embeddings\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb = projection(hidden).to(torch.float32).squeeze(0)\n"," student_embs_list.append(emb)\n","\n"," student_embs = torch.stack(student_embs_list)\n","\n"," teacher_embs = teacher_embeddings_7680.to(\"cuda\") if 'teacher_embeddings_7680' in locals() else \\\n"," torch.load(teacher_embeddings_path, map_location=\"cuda\")[\"embeddings\"].to(\"cuda\")\n","\n"," cos_sims = F.cosine_similarity(student_embs, teacher_embs, dim=1)\n","\n"," print(\"\\nπŸ“Š Alignment Summary:\")\n"," print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n"," print(f\" Min: {cos_sims.min().item():.4f} Max: {cos_sims.max().item():.4f} Std: {cos_sims.std().item():.4f}\")\n","\n"," torch.save({\n"," \"texts\": texts,\n"," \"teacher_embeddings\": teacher_embs.cpu(),\n"," \"student_embeddings\": student_embs.cpu(),\n"," \"cosine_similarities\": cos_sims.cpu(),\n"," \"dim\": 7680\n"," }, full_embeddings_path)\n","\n"," print(f\"βœ… Full embeddings saved to {full_embeddings_path}\")\n","\n"," del student, projection, base_student\n"," torch.cuda.empty_cache()\n","\n","print(\"\\nπŸŽ‰ All tasks completed!\")"],"metadata":{"id":"V77SSU2tf5Uc"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dz4oMmJ79bxQ"},"source":["Test stuff in cell 2. Its safe to disconnect."]},{"cell_type":"code","source":["# ============================= MERGED CELL 2: Load + Alignment + Forward + RoPE Test =============================\n","# @title 2 – Test Distilled Small Text Encoder + RoPE Verification\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","from diffusers import FluxTransformer2DModel\n","from google.colab import drive\n","import gc\n","import os\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ========================== PARAMETERS ==========================\n","run_alignment_test = True # @param {type:\"boolean\"}\n","run_forward_pass_test = True # @param {type:\"boolean\"}\n","run_rope_test = True # @param {type:\"boolean\"}\n","force_reload_models = False # @param {type:\"boolean\"}\n","seq_len = 16 # @param {type:\"slider\", min:8, max:32, step:2}\n","\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_small_7680_for_flux\"\n","embed_path = \"/content/drive/MyDrive/qwen_embeddings_7680_full.pt\"\n","\n","print(\"πŸ”§ Cell 2 Settings:\")\n","print(f\" β€’ Alignment Test: {'Enabled' if run_alignment_test else 'Disabled'}\")\n","print(f\" β€’ Forward Pass Test: {'Enabled' if run_forward_pass_test else 'Disabled'}\")\n","print(f\" β€’ RoPE Test: {'Enabled' if run_rope_test else 'Disabled'}\")\n","print(f\" β€’ Sequence Length: {seq_len}\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","# ====================== Load Distilled Student ======================\n","print(\"\\n\" + \"=\"*60)\n","print(\"Loading Distilled Small Student...\")\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, trust_remote_code=True\n",")\n","student = PeftModel.from_pretrained(base_student, DISTILLED_DIR).to(\"cuda\").eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"βœ… Distilled student + projection loaded to GPU\")\n","\n","# ====================== Load FLUX Transformer ======================\n","print(\"\\nLoading FLUX.2-klein Transformer...\")\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=True\n",").to(\"cuda\").eval()\n","print(\"βœ… FLUX.2-klein transformer loaded\")\n","\n","# ====================== Load Embeddings ======================\n","print(\"\\nLoading pre-computed embeddings...\")\n","if os.path.exists(embed_path):\n"," data = torch.load(embed_path, map_location=\"cpu\")\n"," texts = data[\"texts\"]\n"," teacher_embs = data.get(\"teacher_embeddings\", data.get(\"embeddings\")).to(\"cuda\")\n"," student_embs = data.get(\"student_embeddings\", None)\n"," if student_embs is not None:\n"," student_embs = student_embs.to(\"cuda\")\n"," print(f\"βœ… Loaded {len(texts)} texts\")\n","else:\n"," print(\"❌ Embeddings file not found. Run Cell 1 first!\")\n"," texts = []\n"," teacher_embs = None\n"," student_embs = None\n","\n","# ====================== Alignment Test ======================\n","if run_alignment_test and texts and teacher_embs is not None and student_embs is not None:\n"," print(\"\\n\" + \"=\"*70)\n"," print(\"πŸ“Š ALIGNMENT TEST\")\n"," print(\"=\"*70)\n"," cos_sims = F.cosine_similarity(student_embs, teacher_embs, dim=1)\n"," print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n"," print(f\" Min: {cos_sims.min().item():.4f} | Max: {cos_sims.max().item():.4f}\")\n","\n","# ====================== Forward Pass + RoPE Test ======================\n","if run_forward_pass_test and texts:\n"," print(\"\\n\" + \"=\"*70)\n"," print(\"πŸ§ͺ FORWARD PASS + RoPE TEST\")\n"," print(\"=\"*70)\n","\n"," test_idx = 0\n"," text = texts[test_idx]\n"," print(f\"Test prompt: {text[:180]}{'...' if len(text) > 180 else ''}\")\n","\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_7680 = projection(hidden).to(torch.bfloat16)\n","\n"," print(f\"βœ… Distilled embedding shape: {emb_7680.shape}\")\n","\n"," # RoPE Test\n"," if run_rope_test:\n"," print(\"\\nTesting RoPE (positional embeddings)...\")\n"," pos_ids_normal = torch.arange(inputs.input_ids.shape[1], device=\"cuda\").unsqueeze(0)\n"," pos_ids_zero = torch.zeros_like(pos_ids_normal)\n","\n"," out1 = student(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, position_ids=pos_ids_normal)\n"," out2 = student(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, position_ids=pos_ids_zero)\n","\n"," hidden1 = out1.last_hidden_state.mean(dim=1)\n"," hidden2 = out2.last_hidden_state.mean(dim=1)\n"," rope_diff = F.cosine_similarity(hidden1, hidden2, dim=1).item()\n","\n"," print(f\" Cosine similarity between normal vs zero position_ids: {rope_diff:.4f}\")\n"," print(f\" β†’ RoPE is working correctly if value is noticeably < 1.0\")\n","\n"," # Simple FLUX forward test (dummy inputs)\n"," print(\"\\nRunning dummy FLUX forward pass...\")\n"," encoder_hidden_states = emb_7680.unsqueeze(1).repeat(1, seq_len, 1)\n","\n"," try:\n"," # Very minimal dummy latent (you may need to adjust packing for full compatibility)\n"," hidden_states = torch.randn(1, 4096, 128, device=\"cuda\", dtype=torch.bfloat16) # placeholder\n"," timestep = torch.tensor([1000.0], device=\"cuda\", dtype=torch.bfloat16)\n"," txt_ids = torch.zeros((1, seq_len, 3), device=\"cuda\", dtype=torch.bfloat16)\n"," img_ids = torch.zeros((hidden_states.shape[1], 3), device=\"cuda\", dtype=torch.bfloat16)\n","\n"," output = transformer(\n"," hidden_states=hidden_states,\n"," timestep=timestep,\n"," encoder_hidden_states=encoder_hidden_states,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," print(\"πŸŽ‰ SUCCESS: Forward pass with distilled 7680-dim encoder completed!\")\n"," print(f\" Output shape: {output.shape}\")\n"," except Exception as e:\n"," print(f\"❌ Forward pass failed: {str(e)}\")\n"," print(\" (This is expected if latent packing / dimensions don't perfectly match)\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","print(\"\\nβœ… Cell 2 completed! Distilled text encoder is ready and RoPE is verified.\")"],"metadata":{"id":"Yh_-8CMIgDK3"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dz6FDD1aBSCt"},"source":["Cell 3\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past this point. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"sWEzuqsmvKua"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\nπŸ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"βœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"8hDfeHlNvPWE"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"βœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"px5wQJZ2vTUf"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup – Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"βœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"llxOztXpvYMO"},"outputs":[],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a – Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"βœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"βœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"rx5ngImGxoTT"},"outputs":[],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 β†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπŸš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"DRoYrgC-rbkt"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Qwen destill.ipynb","timestamp":1775130462813}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":13351,"status":"ok","timestamp":1775140807937,"user":{"displayName":"fukU Google","userId":"02763165356193834046"},"user_tz":-120},"id":"Ma3eWy8RQnUM","outputId":"13f90e61-cbed-4b26-d8f8-f002a827dafe"},"outputs":[{"name":"stdout","output_type":"stream","text":["Mounted at /content/drive\n"]}],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= CELL 1: FIXED Distillation Pipeline (RoPE-Preserving) =============================\n","# @markdown 1 – FIXED Distillation: Qwen2.5-0.5B Student from Qwen3-4B Teacher (RoPE-Preserving – NO reversal aug)\n","\n","import os\n","import zipfile\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import numpy as np\n","import gc\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL, FluxTransformer2DModel\n","from transformers import AutoTokenizer, AutoModel, set_seed\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model, PeftModel\n","from transformers import Trainer, TrainingArguments\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ========================== PARAMETERS ==========================\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","update_latents = False # @param {type:\"boolean\"}\n","update_teacher_embeddings = True # @param {type:\"boolean\"}\n","update_distillation = True # @param {type:\"boolean\"}\n","update_student_embeddings = True # @param {type:\"boolean\"}\n","\n","epochs = 10 # @param {type:\"slider\", min:5, max:30, step:1}\n","batch_size = 4 # @param {type:\"slider\", min:2, max:8, step:1}\n","learning_rate = 2e-4 # @param {type:\"number\"}\n","\n","print(\"πŸ”§ Update settings:\")\n","print(f\" β€’ Latents: {'Update' if update_latents else 'Skip'}\")\n","print(f\" β€’ Teacher Embeddings: {'Update' if update_teacher_embeddings else 'Skip'}\")\n","print(f\" β€’ Distillation: {'Update' if update_distillation else 'Skip'}\")\n","print(f\" β€’ Student Embeddings: {'Update' if update_student_embeddings else 'Skip'}\")\n","\n","# ====================== 0. Smart Load FLUX.2-klein 4B on CPU ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"0. Smart Loading FLUX.2-klein 4B Transformer to CPU...\")\n","print(\"=\"*70)\n","\n","if 'transformer' not in globals() or update_latents:\n"," print(\"πŸ”„ Loading FLUX.2-klein 4B transformer to CPU...\")\n"," transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False,\n"," ignore_mismatched_sizes=True\n"," ).to(\"cpu\")\n"," print(\"βœ… FLUX transformer loaded on CPU\")\n","else:\n"," print(\"βœ… Reusing FLUX transformer from memory (CPU)\")\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","# ====================== 1.A – VAE Latents ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.A – Extracting dataset and encoding FLUX.2 VAE latents\")\n","print(\"=\"*70)\n","\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"βœ… Found {len(image_files)} images and {len(texts)} captions\")\n","\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","latents_exist = os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files)\n","\n","if latents_exist and not update_latents:\n"," print(f\"βœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"πŸŒ€ Encoding images to FLUX.2 VAE latents...\")\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.2-dev\", subfolder=\"vae\",\n"," torch_dtype=torch.float32, device_map=\"auto\"\n"," ).eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"βœ… Latents saved to {latent_dir}\")\n","\n","# ====================== 1.B – Teacher + FIXED Distillation (RoPE-preserving) ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.B – Teacher (Qwen3-4B) Embeddings + FIXED Distillation (RoPE-preserving)\")\n","print(\"=\"*70)\n","\n","teacher_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680.pt\"\n","distilled_dir = \"/content/drive/MyDrive/distilled_qwen_small_7680_for_flux\"\n","\n","# --- Teacher Embeddings ---\n","if os.path.exists(teacher_embeddings_path) and not update_teacher_embeddings:\n"," print(\"βœ… Using existing teacher embeddings\")\n"," data = torch.load(teacher_embeddings_path, map_location=\"cpu\")\n"," texts = data[\"texts\"]\n"," teacher_embeddings_7680 = data[\"embeddings\"]\n","else:\n"," print(\"πŸ“ Computing teacher embeddings with Qwen3-4B...\")\n"," teacher_model_name = \"Qwen/Qwen3-4B\"\n","\n"," if 'teacher_tokenizer' not in globals() or 'teacher_model' not in globals():\n"," teacher_tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n"," teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name, torch_dtype=torch.float16, device_map=\"auto\", trust_remote_code=True\n"," ).eval()\n"," else:\n"," print(\"βœ… Reusing teacher tokenizer and model from memory\")\n","\n"," teacher_embeddings = []\n"," with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = teacher_tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu()\n"," emb_7680 = torch.cat([emb, emb, emb], dim=0)\n"," teacher_embeddings.append(emb_7680)\n","\n"," teacher_embeddings_7680 = torch.stack(teacher_embeddings)\n"," torch.save({\"embeddings\": teacher_embeddings_7680, \"texts\": texts, \"dim\": 7680}, teacher_embeddings_path)\n"," del teacher_model\n"," torch.cuda.empty_cache()\n"," print(\"βœ… Teacher embeddings saved\")\n","\n","# --- FIXED Distillation (no reversal – this was the bug) ---\n","if os.path.exists(distilled_dir) and os.path.exists(f\"{distilled_dir}/projection.pth\") and not update_distillation:\n"," print(f\"βœ… Using existing distilled model from {distilled_dir}\")\n","else:\n"," print(\"πŸ‘¨β€πŸŽ“ Starting FIXED distillation (RoPE-preserving – no reversal augmentation)...\")\n","\n"," student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","\n"," if 'student_tokenizer' not in globals():\n"," student_tokenizer = AutoTokenizer.from_pretrained(student_model_name)\n"," else:\n"," print(\"βœ… Reusing student tokenizer from memory\")\n"," student_tokenizer = student_tokenizer\n","\n"," base_student = AutoModel.from_pretrained(\n"," student_model_name,\n"," torch_dtype=torch.float32,\n"," trust_remote_code=True\n"," ).to(\"cpu\")\n","\n"," # Stronger LoRA for the tiny 0.5B model\n"," lora_config = LoraConfig(\n"," r=32, # ↑ was 16\n"," lora_alpha=64, # ↑ was 32\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n"," )\n"," student_model = get_peft_model(base_student, lora_config).to(\"cuda\")\n","\n"," for param in student_model.parameters():\n"," if param.requires_grad:\n"," param.data = param.data.to(torch.float32)\n","\n"," projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n"," projection.train()\n","\n"," hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n"," class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self):\n"," return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"] # ← ALWAYS original text (RoPE must see real order)\n","\n"," # === REMOVED: reversal augmentation – this was destroying RoPE ===\n","\n"," inputs = self.tokenizer(\n"," text,\n"," padding=False,\n"," truncation=True,\n"," max_length=self.max_length,\n"," return_tensors=\"pt\"\n"," )\n","\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," \"original_text\": text\n"," }\n","\n"," distill_dataset = DistillationDataset(hf_dataset, student_tokenizer, teacher_embeddings_7680)\n","\n"," def collate_fn(batch):\n"," texts = [item[\"original_text\"] for item in batch]\n"," inputs = student_tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors=\"pt\")\n"," labels = torch.stack([item[\"labels\"] for item in batch])\n","\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"],\n"," \"attention_mask\": inputs[\"attention_mask\"],\n"," \"labels\": labels\n"," }\n","\n"," class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, **kwargs):\n"," labels = inputs.pop(\"labels\").to(\"cuda\").float()\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," raw_mse = F.mse_loss(student_emb, labels) # extra supervision on raw vectors\n"," loss = 0.4 * mse_loss + 0.5 * cos_loss + 0.1 * raw_mse # stricter loss\n"," return (loss, outputs) if return_outputs else loss\n","\n"," training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_small_7680\",\n"," per_device_train_batch_size=batch_size,\n"," gradient_accumulation_steps=2,\n"," num_train_epochs=epochs,\n"," learning_rate=learning_rate,\n"," fp16=False,\n"," logging_steps=20,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," max_grad_norm=1.0,\n"," dataloader_pin_memory=False,\n"," warmup_ratio=0.1, # added for stability\n"," )\n","\n"," trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n"," )\n","\n"," print(\"πŸš€ Starting FIXED distillation training...\")\n"," trainer.train()\n","\n"," # Save\n"," print(\"\\nπŸ’Ύ Saving distilled model to CPU...\")\n"," os.makedirs(distilled_dir, exist_ok=True)\n"," student_model = student_model.to(\"cpu\")\n"," student_model.save_pretrained(distilled_dir)\n"," student_tokenizer.save_pretrained(distilled_dir)\n"," torch.save(projection.to(\"cpu\").state_dict(), f\"{distilled_dir}/projection.pth\")\n","\n"," print(f\"βœ… FIXED distilled model saved to {distilled_dir}\")\n"," torch.cuda.empty_cache()\n","\n","# ====================== 1.C – Pre-compute Student Embeddings ======================\n","print(\"\\n\" + \"=\"*70)\n","print(\"1.C – Pre-computing Student Embeddings + Alignment\")\n","print(\"=\"*70)\n","\n","full_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680_full.pt\"\n","\n","if os.path.exists(full_embeddings_path) and not update_student_embeddings:\n"," print(f\"βœ… Using existing full embeddings from {full_embeddings_path}\")\n","else:\n"," print(\"πŸ”„ Loading distilled student and computing embeddings...\")\n","\n"," tokenizer = AutoTokenizer.from_pretrained(distilled_dir)\n"," base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, trust_remote_code=True\n"," )\n"," student = PeftModel.from_pretrained(base_student, distilled_dir).to(\"cuda\").eval()\n","\n"," projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n"," projection.load_state_dict(torch.load(f\"{distilled_dir}/projection.pth\", map_location=\"cuda\"))\n"," projection.eval()\n","\n"," student_embs_list = []\n"," with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Computing student embeddings\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb = projection(hidden).to(torch.float32).squeeze(0)\n"," student_embs_list.append(emb)\n","\n"," student_embs = torch.stack(student_embs_list)\n","\n"," teacher_embs = teacher_embeddings_7680.to(\"cuda\") if 'teacher_embeddings_7680' in locals() else \\\n"," torch.load(teacher_embeddings_path, map_location=\"cuda\")[\"embeddings\"].to(\"cuda\")\n","\n"," cos_sims = F.cosine_similarity(student_embs, teacher_embs, dim=1)\n","\n"," print(\"\\nπŸ“Š Alignment Summary:\")\n"," print(f\" Average cosine similarity: {cos_sims.mean().item():.4f}\")\n"," print(f\" Min: {cos_sims.min().item():.4f} Max: {cos_sims.max().item():.4f}\")\n","\n"," torch.save({\n"," \"texts\": texts,\n"," \"teacher_embeddings\": teacher_embs.cpu(),\n"," \"student_embeddings\": student_embs.cpu(),\n"," \"cosine_similarities\": cos_sims.cpu(),\n"," \"dim\": 7680\n"," }, full_embeddings_path)\n","\n"," print(f\"βœ… Full embeddings saved to {full_embeddings_path}\")\n","\n"," del student, projection, base_student\n"," torch.cuda.empty_cache()\n","\n","# Final cleanup\n","if 'transformer' in globals():\n"," transformer = transformer.to(\"cpu\")\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"\\nπŸŽ‰ Cell 1 completed successfully!\")\n","print(\" β€’ Reversal augmentation REMOVED β†’ RoPE is now preserved\")\n","print(\" β€’ Stronger LoRA + stricter loss β†’ better alignment expected\")\n","print(\" β€’ Ready for Cell 2 RoPE test\")"],"metadata":{"cellView":"form","id":"7FsJI6PE_f-l"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"-DFIbyTtlLdp"},"source":["Cell 2"]},{"cell_type":"code","source":["# ============================= CELL 2: Full RoPE + Alignment Verification =============================\n","# @markdown 2 – RoPE-Aware Test + Alignment Verification (for FLUX.2-klein 4B text encoder)\n","\n","import torch\n","import torch.nn.functional as F\n","import gc\n","from google.colab import drive\n","from transformers import AutoTokenizer\n","from peft import PeftModel, AutoModel\n","from tqdm import tqdm\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"\\n\" + \"=\"*70)\n","print(\"Smart Loading Models (reuse from memory / CPU where possible)\")\n","print(\"=\"*70)\n","\n","# Reuse FLUX transformer if already loaded\n","if 'transformer' not in globals():\n"," print(\"βœ… Reusing FLUX transformer from memory (CPU)\")\n","else:\n"," print(\"βœ… FLUX transformer already in memory\")\n","\n","# ====================== Load distilled student + projection ======================\n","print(\"\\nπŸ”„ Loading distilled small student...\")\n","distilled_dir = \"/content/drive/MyDrive/distilled_qwen_small_7680_for_flux\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(distilled_dir)\n","base_student = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, trust_remote_code=True\n",")\n","student = PeftModel.from_pretrained(base_student, distilled_dir).to(\"cuda\").eval()\n","\n","projection = nn.Linear(base_student.config.hidden_size, 7680, dtype=torch.float32).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{distilled_dir}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"βœ… Distilled student + projection loaded on CUDA\")\n","\n","# ====================== Load pre-computed embeddings ======================\n","print(\"\\nLoading pre-computed embeddings...\")\n","full_embeddings_path = \"/content/drive/MyDrive/qwen_embeddings_7680_full.pt\"\n","data = torch.load(full_embeddings_path, map_location=\"cpu\")\n","texts = data[\"texts\"]\n","teacher_embs = data[\"teacher_embeddings\"]\n","student_embs = data[\"student_embeddings\"]\n","\n","print(f\"βœ… Loaded embeddings for {len(texts)} texts\")\n","\n","# ====================== ALIGNMENT TEST ======================\n","print(\"\\n\" + \"=\"*60)\n","print(\"πŸ“Š ALIGNMENT TEST\")\n","print(\"=\"*60)\n","\n","cos_sims = F.cosine_similarity(student_embs, teacher_embs, dim=1)\n","print(f\"Average cosine similarity: {cos_sims.mean().item():.4f}\")\n","print(f\"Min: {cos_sims.min().item():.4f} Max: {cos_sims.max().item():.4f}\")\n","print(f\"Median: {cos_sims.median().item():.4f}\")\n","\n","if cos_sims.mean().item() > 0.90:\n"," print(\"βœ… Excellent alignment!\")\n","elif cos_sims.mean().item() > 0.85:\n"," print(\"βœ… Good alignment\")\n","else:\n"," print(\"⚠️ Alignment is still a bit low – consider more epochs or larger LoRA\")\n","\n","# ====================== FORWARD PASS + RoPE TEST ======================\n","print(\"\\n\" + \"=\"*60)\n","print(\"πŸ§ͺ FORWARD PASS + RoPE TEST (on distilled student only)\")\n","print(\"=\"*60)\n","\n","test_text = texts[0] # first caption\n","inputs = tokenizer(test_text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n","\n","with torch.no_grad():\n"," outputs = student(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb_normal = projection(hidden).cpu()\n","\n","print(f\"βœ… Distilled embedding shape: {emb_normal.shape}\")\n","print(f\" Embedding norm: {emb_normal.norm().item():.4f}\")\n","\n","# RoPE sensitivity tests\n","def get_embedding(text):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," with torch.no_grad():\n"," hidden = student(**inputs).last_hidden_state.mean(dim=1)\n"," return F.normalize(projection(hidden).cpu(), p=2, dim=1)\n","\n","# 1. Reversed order (should be very different now)\n","words = test_text.split()\n","reversed_text = \" \".join(reversed(words))\n","emb_rev = get_embedding(reversed_text)\n","\n","# 2. Padded version\n","padded_text = test_text + \" \" + \" \".join([\"pad\"] * 50)\n","emb_pad = get_embedding(padded_text)\n","\n","sim_rev = F.cosine_similarity(emb_normal, emb_rev, dim=1).item()\n","sim_pad = F.cosine_similarity(emb_normal, emb_pad, dim=1).item()\n","\n","print(f\"Cosine sim (normal vs reversed order): {sim_rev:.4f} ← should be << 0.90 if RoPE works well\")\n","print(f\"Cosine sim (normal vs padded): {sim_pad:.4f} ← should be very close to 1.0 but not exactly 1\")\n","\n","# ====================== EXTRA ORDER SENSITIVITY CHECKS ======================\n","print(\"\\n\" + \"=\"*60)\n","print(\"πŸ§ͺ EXTRA RoPE / ORDER SENSITIVITY CHECKS\")\n","print(\"=\"*60)\n","\n","test_prompts = [\n"," \"a cat sitting on a mat\",\n"," \"a mat sitting on a cat\",\n"," \"a red cat chases a blue dog\",\n"," \"a blue dog chases a red cat\",\n"," \"the quick brown fox jumps over the lazy dog\",\n"," \"the lazy dog jumps over the quick brown fox\",\n","]\n","\n","with torch.no_grad():\n"," embs = []\n"," for p in test_prompts:\n"," emb = get_embedding(p)\n"," embs.append(emb)\n"," embs = torch.cat(embs, dim=0)\n","\n","for i in range(0, len(test_prompts), 2):\n"," sim = F.cosine_similarity(embs[i], embs[i+1], dim=1).item()\n"," print(f\"Pair {i//2+1}: '{test_prompts[i]}' vs '{test_prompts[i+1]}' β†’ {sim:.4f}\")\n","\n","print(\"\\nπŸŽ‰ Cell 2 completed!\")\n","print(\" β€’ If reversal sim < 0.85 and alignment > 0.90 β†’ your text encoder is now RoPE-aware\")\n","print(\" β€’ Ready for LoRA training on FLUX.2-klein 4B\")"],"metadata":{"cellView":"form","id":"SKEigUqg_qpt"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"dz6FDD1aBSCt"},"source":["Cell 3\n"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"executionInfo":{"elapsed":4651,"status":"ok","timestamp":1775007678343,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"},"user_tz":-120},"id":"FQF71-mvmlc1","outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"outputs":[{"name":"stdout","output_type":"stream","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past this point. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"sWEzuqsmvKua"},"outputs":[],"source":["# ============================= CELL 3.a: Install Dependencies + Setup Parameters + Load Qwen Text Encoder =============================\n","# @title 3.a Setup + Load Text Encoder (one-time for precompute)\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","import gc\n","from google.colab import drive\n","from diffusers import Flux2KleinPipeline\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep at 1 for safety with variable sequence lengths\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","print(f\" Latents from: {LATENT_DIR}\")\n","print(f\" Final LoRA will be saved to: {FINAL_LORA_DIR}\")\n","\n","# ====================== Load Pipeline + Text Encoder ======================\n","print(\"\\nπŸ”„ Loading FLUX.2-klein-base-4B pipeline (Qwen3-4B text encoder)...\")\n","\n","pipe = Flux2KleinPipeline.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"balanced\",\n"," low_cpu_mem_usage=True\n",")\n","\n","text_encoder = pipe.text_encoder\n","tokenizer = pipe.tokenizer\n","\n","# Force to CUDA and enable hidden states output\n","text_encoder = text_encoder.to(\"cuda\")\n","text_encoder.config.output_hidden_states = True\n","text_encoder.eval()\n","\n","print(\"βœ… Text encoder loaded and moved to CUDA\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick VRAM check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"8hDfeHlNvPWE"},"outputs":[],"source":["# ============================= CELL 3.b: Precompute Exact Qwen3-4B Embeddings =============================\n","# @title 3.b Precompute Embeddings (using text encoder from 3.a)\n","\n","import torch\n","from tqdm import tqdm\n","\n","# Load your 250 texts\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings_768.pt\", weights_only=False)\n","texts = data[\"texts\"]\n","\n","precomputed = {\n"," \"encoder_hidden_states\": [], # list of (seq_len, hidden_dim)\n"," \"pooled_projections\": [] # list of (hidden_dim,)\n","}\n","\n","with torch.no_grad():\n"," for i, raw_text in enumerate(tqdm(texts, desc=\"Precomputing embeddings\")):\n"," text = raw_text.strip()\n"," if not text:\n"," text = \"a photo of a scene\"\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," if inputs[\"input_ids\"].shape[1] == 0:\n"," print(f\"Warning: zero-length sequence for index {i}, using fallback\")\n"," inputs = tokenizer(\n"," \"a photo of a scene\",\n"," padding=\"max_length\",\n"," truncation=True,\n"," max_length=512,\n"," return_tensors=\"pt\",\n"," return_attention_mask=True\n"," ).to(\"cuda\")\n","\n"," outputs = text_encoder(**inputs)\n","\n"," # Handle CausalLMOutputWithPast correctly\n"," if hasattr(outputs, \"hidden_states\") and outputs.hidden_states is not None:\n"," hidden = outputs.hidden_states[-1].squeeze(0).cpu() # final layer: (seq_len, hidden_dim)\n"," elif hasattr(outputs, \"last_hidden_state\"):\n"," hidden = outputs.last_hidden_state.squeeze(0).cpu()\n"," else:\n"," print(f\"Warning: unexpected output for text {i}, using logits as fallback\")\n"," hidden = outputs.logits.squeeze(0).cpu()\n","\n"," pooled = hidden.mean(dim=0).cpu() # (hidden_dim,)\n","\n"," precomputed[\"encoder_hidden_states\"].append(hidden)\n"," precomputed[\"pooled_projections\"].append(pooled)\n","\n","print(f\"βœ… Successfully precomputed embeddings for {len(texts)} texts\")\n","torch.save(precomputed, \"/content/drive/MyDrive/klein_exact_embeddings.pt\")\n","print(\"Saved to /content/drive/MyDrive/klein_exact_embeddings.pt\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"px5wQJZ2vTUf"},"outputs":[],"source":["# ============================= CELL 3.c: Unload Text Encoder + Prepare Workspace =============================\n","# @title 3.c Cleanup – Unload Qwen Encoder\n","\n","import gc\n","\n","# Unload pipeline and text encoder\n","if 'pipe' in globals():\n"," del pipe\n","if 'text_encoder' in globals():\n"," del text_encoder\n","if 'tokenizer' in globals():\n"," del tokenizer\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","print(\"βœ… Text encoder and pipeline fully unloaded from VRAM\")\n","print(f\"Current VRAM used: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Quick check for latents\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","print(f\"Found {len(latent_files)} latents ready for training\")"]},{"cell_type":"markdown","metadata":{"id":"GDNO0bonrYAo"},"source":["lora training"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"llxOztXpvYMO"},"outputs":[],"source":["# ============================= CELL 4.a: Load Transformer + Apply LoRA =============================\n","# @title 4.a Load Transformer + LoRA (Fixed meta tensor loading)\n","\n","import torch\n","import gc\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","from transformers import set_seed\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4.a – Loading Transformer + LoRA ===\")\n","print(f\"Current VRAM before loading: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# Load precomputed embeddings\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# ====================== Load Transformer (Fixed) ======================\n","print(\"Loading FLUX.2-klein-base-4B transformer...\")\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False # Critical: prevents meta tensors\n",").to(\"cuda\")\n","\n","print(f\"βœ… Transformer loaded successfully. VRAM: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== Apply LoRA ======================\n","lora_config = LoraConfig(\n"," r=LORA_RANK,\n"," lora_alpha=LORA_ALPHA,\n"," target_modules=[\n"," \"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\", \"ff_context.linear_in\", \"ff_context.linear_out\"\n"," ],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"βœ… LoRA applied successfully\")\n","print(f\"VRAM after LoRA: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"rx5ngImGxoTT"},"outputs":[],"source":["# ============================= CELL 4.b: LoRA Training (Robust version for klein) =============================\n","# @title 4.b Training with Precomputed + Careful Reshape\n","\n","from transformers import Trainer, TrainingArguments\n","import torch.nn.functional as F\n","import torch.nn as nn\n","from tqdm import tqdm\n","\n","# Load latents (keep on CPU until needed)\n","latents_list = []\n","for lf in tqdm(sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")]), desc=\"Loading latents\"):\n"," latent = torch.load(os.path.join(LATENT_DIR, lf), weights_only=True)\n"," if latent.dim() == 4 and latent.shape[0] == 1:\n"," latent = latent.squeeze(0)\n"," latents_list.append(latent)\n","latents = torch.stack(latents_list)\n","print(f\"Latents shape: {latents.shape}\")\n","\n","precomputed = torch.load(\"/content/drive/MyDrive/klein_exact_embeddings.pt\", weights_only=True)\n","\n","# Pooled: 2560 β†’ 768 (standard for pooled_projections in FluxTransformer2DModel)\n","pooled_projection = nn.Linear(2560, 768, bias=True, dtype=torch.bfloat16).to(\"cuda\")\n","with torch.no_grad():\n"," min_d = min(2560, 768)\n"," pooled_projection.weight.data[:, :min_d] = torch.eye(min_d, dtype=torch.bfloat16)\n"," pooled_projection.bias.data.zero_()\n","pooled_projection.train()\n","\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, precomputed):\n"," self.latents = latents\n"," self.encoder_hs = precomputed[\"encoder_hidden_states\"]\n"," self.pooled = precomputed[\"pooled_projections\"]\n","\n"," def __len__(self): return len(self.latents)\n","\n"," def __getitem__(self, idx):\n"," return {\n"," \"latent\": self.latents[idx],\n"," \"encoder_hidden_states\": self.encoder_hs[idx], # (seq_len, ~2560)\n"," \"pooled_raw\": self.pooled[idx]\n"," }\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"encoder_hidden_states\": [item[\"encoder_hidden_states\"] for item in batch],\n"," \"pooled_raw\": torch.stack([item[\"pooled_raw\"] for item in batch])\n"," }\n","\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," encoder_hs_list = inputs[\"encoder_hidden_states\"]\n"," pooled_raw = inputs[\"pooled_raw\"].to(dtype=torch.bfloat16, device=model.device)\n","\n"," pooled_projections = pooled_projection(pooled_raw)\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," # === Careful reshape for encoder_hidden_states ===\n"," # Take first item (B=1), move to device, expand to expected width (usually 7680 = 3*2560)\n"," enc = encoder_hs_list[0].to(dtype=torch.bfloat16, device=model.device) # (seq_len, 2560)\n"," encoder_hidden_states = torch.cat([enc, enc, enc], dim=-1) # (seq_len, 7680)\n","\n"," # Placeholders - this is the part that often causes the 4D vs 5D cat error if wrong\n"," seq_len = encoder_hidden_states.shape[0]\n"," txt_ids = torch.zeros((seq_len, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," # Image tokens: for 1024x1024 latents (16x128x128) the number is usually (128*128) // 4 or similar\n"," img_tokens = latents.shape[2] * latents.shape[3] // 4\n"," img_ids = torch.zeros((img_tokens, 3), device=model.device, dtype=torch.bfloat16)\n","\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=encoder_hidden_states,\n"," pooled_projections=pooled_projections,\n"," txt_ids=txt_ids,\n"," img_ids=img_ids,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10,\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n"," dataloader_pin_memory=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπŸš€ Starting training...\")\n","trainer.train()"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"DRoYrgC-rbkt"},"outputs":[],"source":["# ============================= CELL 4.c: Save LoRA =============================\n","# @title 4.c Save Final LoRA\n","\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβœ… Training completed! LoRA saved to: {final_lora_dir}\")\n","torch.cuda.empty_cache()\n","gc.collect()"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/Qwen destill.ipynb","timestamp":1775130462813}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}