Datasets:
Tags:
Not-For-All-Audiences
Upload Qwen destill.ipynb
Browse files- Qwen destill.ipynb +1 -1
Qwen destill.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells":[{"cell_type":"code","source":["# ============================= SESSION 1: ENCODE IMAGES TO FLUX VAE LATENTS + TEXT TO QWEN EMBEDDINGS (FIXED) =============================\n","# @title 1. Process Images β Flux VAE Latents + Texts β Qwen Embeddings (with VRAM cleanup)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","import os\n","import zipfile\n","import torch\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import AutoTokenizer, AutoModel\n","\n","drive.mount('/content/drive')\n","\n","print(\"π Checking GPU...\")\n","!nvidia-smi\n","\n","# ====================== Unzip ======================\n","print(\"π¦ Extracting zip file...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","# Find images\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"β
Found {len(image_files)} images\")\n","\n","# ====================== 1. Encode Images β Flux VAE Latents ======================\n","print(\"\\nπ Loading Flux VAE (float32 for compatibility) and encoding images...\")\n","\n","vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.1-dev\",\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32, # Changed to float32 to avoid dtype mismatch\n"," device_map=\"auto\"\n",")\n","vae.eval()\n","\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","os.makedirs(latent_dir, exist_ok=True)\n","\n","with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding images to latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\")\n","\n"," # Resize to Flux-preferred resolution\n"," image = image.resize((1024, 1024), Image.LANCZOS)\n","\n"," # Create pixel_values and cast to VAE dtype\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n","\n"," # Save latent\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n","print(f\"β
Image latents saved to: {latent_dir}\")\n","\n","# Unload VAE\n","del vae\n","torch.cuda.empty_cache()\n","print(\"ποΈ VAE unloaded. VRAM freed.\\n\")\n","\n","# ====================== 2. Encode Texts β Qwen Embeddings ======================\n","print(\"π Loading Qwen text encoder and computing embeddings...\")\n","\n","# Load texts (1.txt to 7.txt or any .txt starting with digit)\n","text_files = [f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()]\n","texts = []\n","for tf in sorted(text_files):\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"β
Loaded {len(texts)} text files\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","model.eval()\n","\n","def mean_pooling(model_output, attention_mask):\n"," token_embeddings = model_output[0]\n"," input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n"," return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n","\n","embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Encoding texts\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=8192, return_tensors=\"pt\").to(model.device)\n"," outputs = model(**inputs)\n"," emb = mean_pooling(outputs, inputs['attention_mask']).squeeze(0).cpu()\n"," embeddings.append(emb)\n","\n","# Save embeddings\n","embed_save_path = \"/content/drive/MyDrive/qwen_embeddings.pt\"\n","torch.save({\n"," \"embeddings\": embeddings,\n"," \"texts\": texts,\n"," \"model_name\": teacher_model_name\n","}, embed_save_path)\n","\n","print(f\"β
Qwen embeddings saved to: {embed_save_path}\")\n","\n","# Unload Qwen encoder\n","del model, tokenizer\n","torch.cuda.empty_cache()\n","print(\"ποΈ Qwen encoder unloaded. VRAM freed.\")\n","\n","print(\"\\nπ Session 1 completed successfully!\")\n","print(\" β’ Flux VAE latents saved to /content/drive/MyDrive/flux_klein_latents\")\n","print(\" β’ Qwen embeddings saved to /content/drive/MyDrive/qwen_embeddings.pt\")\n","print(\"You can now restart the runtime if needed and run Cell 2 for distillation training.\")"],"metadata":{"id":"nLqTMpUSfbe3"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= FINAL FIXED COMBINED CELL 2: TRAIN + EVALUATE + SAVE TO DRIVE =============================\n","# @title Combined: Train Distilled Qwen Encoder + Evaluation + Save to Google Drive (Fully Fixed)\n","\n","import os\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","import numpy as np\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","from tqdm import tqdm\n","from google.colab import drive\n","from torch.utils.data import Dataset\n","from transformers import AutoTokenizer, AutoModel, Trainer, TrainingArguments, set_seed\n","from peft import LoraConfig, get_peft_model\n","from datasets import Dataset as HFDataset\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"π Checking GPU...\")\n","!nvidia-smi\n","if not torch.cuda.is_available():\n"," raise RuntimeError(\"No GPU detected!\")\n","\n","# ====================== 1. Load Teacher Embeddings ======================\n","embed_path = \"/content/drive/MyDrive/qwen_embeddings.pt\"\n","data = torch.load(embed_path, weights_only=False)\n","teacher_embeddings = torch.stack(data[\"embeddings\"])\n","texts = data.get(\"texts\", [f\"text_{i}\" for i in range(len(teacher_embeddings))])\n","\n","print(f\"β
Loaded {len(texts)} texts from Qwen teacher\")\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","# ====================== 2. Student Model (Qwen2.5-0.5B + LoRA) ======================\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","student_tokenizer = AutoTokenizer.from_pretrained(student_model_name)\n","\n","base_model = AutoModel.from_pretrained(\n"," student_model_name,\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_model, lora_config)\n","\n","hidden_size = student_model.config.hidden_size\n","projection = nn.Linear(hidden_size, 1024).to(student_model.device)\n","projection.train()\n","\n","print(f\"π¨βπ Student: {student_model_name} + LoRA + projection ({hidden_size}β1024)\")\n","\n","# ====================== 3. Dataset ======================\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, student_tokenizer, teacher_embeddings)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n","# ====================== 4. Fixed Trainer ======================\n","class DistillationTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\")\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels.to(student_emb.device), p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n","\n"," total_loss = 0.25 * mse_loss + 0.75 * cos_loss\n"," return (total_loss, outputs) if return_outputs else total_loss\n","\n"," # Fully compatible log method for recent transformers versions\n"," def log(self, logs, start_time=None, **kwargs):\n"," # Call the parent log with only the logs dict\n"," super().log(logs)\n"," # Optional: store history for debugging\n"," if not hasattr(self, 'log_history'):\n"," self.log_history = []\n"," self.log_history.append(logs.copy())\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./qwen_family_distilled\",\n"," per_device_train_batch_size=8,\n"," num_train_epochs=80,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50, # Loss printed every 50 steps\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillationTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"π Starting training (loss will be printed every 50 steps)...\")\n","trainer.train()\n","\n","# ====================== 5. Save to Google Drive ======================\n","final_save_dir = \"/content/drive/MyDrive/distilled_qwen_encoder_for_flux\"\n","os.makedirs(final_save_dir, exist_ok=True)\n","\n","student_model.save_pretrained(final_save_dir)\n","student_tokenizer.save_pretrained(final_save_dir)\n","torch.save(projection.state_dict(), f\"{final_save_dir}/projection.pth\")\n","\n","print(f\"\\nβ
Model + LoRA adapters + projection successfully saved to Google Drive:\")\n","print(f\" β {final_save_dir}\")\n","\n","# ====================== 6. Final Evaluation ======================\n","print(\"\\nπ Running final evaluation...\")\n","\n","student_model.eval()\n","student_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Final encoding\"):\n"," inputs = student_tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(student_model.device)\n"," outputs = student_model(**inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," emb = projection(hidden)\n"," student_embeddings.append(emb.squeeze(0).cpu())\n","\n","student_embeddings = torch.stack(student_embeddings).to(student_model.device)\n","\n","# Metrics\n","mse = torch.nn.functional.mse_loss(student_embeddings, teacher_embeddings).item()\n","cos_sims = [cosine_similarity(student_embeddings[i].unsqueeze(0).cpu().numpy(),\n"," teacher_embeddings[i].unsqueeze(0).cpu().numpy())[0][0]\n"," for i in range(len(texts))]\n","\n","avg_cosine = np.mean(cos_sims)\n","std_cosine = np.std(cos_sims)\n","\n","teacher_norms = torch.norm(teacher_embeddings, dim=1).cpu().numpy()\n","student_norms = torch.norm(student_embeddings, dim=1).cpu().numpy()\n","\n","print(f\"\\nπ Final MSE: {mse:.4f}\")\n","print(f\"π Average Cosine Similarity: {avg_cosine:.4f} (Β± {std_cosine:.4f})\")\n","print(f\"Teacher norm: {teacher_norms.mean():.1f} | Student norm: {student_norms.mean():.1f}\")\n","\n","# PCA Plot\n","all_embs = torch.cat([teacher_embeddings.cpu(), student_embeddings.cpu()], dim=0).numpy()\n","pca = PCA(n_components=2, random_state=42)\n","pca_result = pca.fit_transform(all_embs)\n","teacher_pca = pca_result[:len(texts)]\n","student_pca = pca_result[len(texts):]\n","\n","plt.figure(figsize=(14, 10))\n","sns.set_style(\"whitegrid\")\n","plt.scatter(teacher_pca[:, 0], teacher_pca[:, 1], c='blue', label='Qwen Teacher', s=65, marker='o')\n","plt.scatter(student_pca[:, 0], student_pca[:, 1], c='red', label='Distilled Student', s=65, marker='x')\n","for i in range(len(texts)):\n"," plt.plot([teacher_pca[i, 0], student_pca[i, 0]], [teacher_pca[i, 1], student_pca[i, 1]], 'k--', alpha=0.35)\n","plt.title('Shared PCA Space: Teacher vs Student')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","plt.tight_layout()\n","plt.show()\n","\n","print(\"\\nπ Training completed! Model is saved in your Google Drive at:\")\n","print(final_save_dir)"],"metadata":{"id":"idg8fn8Birub"},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"1rORehICZ99xZsrwMfy2w8Jxg6M55d81L","timestamp":1774988291447}],"mount_file_id":"1rORehICZ99xZsrwMfy2w8Jxg6M55d81L","authorship_tag":"ABX9TyNLy5InWdcrgGC1Wt3iNYNE"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|
|
|
|
| 1 |
+
{"cells":[{"cell_type":"code","source":["# ============================= CELL 1: Prepare Latents + Distill 768-dim Text Encoder =============================\n","# @title 1. Process Images β Flux VAE Latents + Distill New 768-dim Text Encoder\n","\n","import os\n","import zipfile\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import AutoTokenizer, AutoModel\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from transformers import Trainer, TrainingArguments, set_seed\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","# ====================== 1. Extract Data ======================\n","print(\"π¦ Extracting zip...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"β
Found {len(image_files)} images\")\n","\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"β
Loaded {len(texts)} captions\")\n","\n","# ====================== 2. Encode Images β Flux VAE Latents ======================\n","print(\"\\nπ Loading Flux VAE and encoding images...\")\n","\n","vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.1-dev\",\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\"\n",")\n","vae.eval()\n","\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","os.makedirs(latent_dir, exist_ok=True)\n","\n","with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding to latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n","print(f\"β
Latents saved to {latent_dir}\")\n","\n","del vae\n","torch.cuda.empty_cache()\n","\n","# ====================== 3. Distill Text Encoder to 768-dim ======================\n","print(\"\\nπ¨βπ Distilling text encoder to 768-dim...\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","teacher_model.eval()\n","\n","# Student model\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","base_student = AutoModel.from_pretrained(\n"," student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_student, lora_config)\n","\n","# Projection to **768** (correct for FLUX.2-klein)\n","projection = nn.Linear(base_student.config.hidden_size, 768).to(\"cuda\")\n","projection.train()\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, tokenizer)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," }\n","\n","# Fixed Distillation Trainer - both teacher and student output 768\n","class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," # Get batch texts for teacher\n"," batch_size = inputs[\"input_ids\"].shape[0]\n"," batch_texts = [texts[i] for i in range(len(inputs[\"input_ids\"]))] # simplistic indexing\n","\n"," # Teacher embedding (768 dim)\n"," with torch.no_grad():\n"," teacher_inputs = tokenizer(\n"," batch_texts, padding=True, truncation=True, max_length=512, return_tensors=\"pt\"\n"," ).to(teacher_model.device)\n"," teacher_out = teacher_model(**teacher_inputs)\n"," teacher_emb = teacher_out.last_hidden_state.mean(dim=1) # (B, 768) - assuming teacher outputs 768\n","\n"," # Student forward\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," student_hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(student_hidden) # (B, 768)\n","\n"," # Normalize + loss\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(teacher_emb.to(student_emb.device), p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.25 * mse_loss + 0.75 * cos_loss\n","\n"," return (loss, outputs) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_768\",\n"," per_device_train_batch_size=8,\n"," num_train_epochs=30,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"π Starting distillation to 768-dim...\")\n","trainer.train()\n","\n","# Save\n","distilled_save_dir = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","os.makedirs(distilled_save_dir, exist_ok=True)\n","student_model.save_pretrained(distilled_save_dir)\n","tokenizer.save_pretrained(distilled_save_dir)\n","torch.save(projection.state_dict(), f\"{distilled_save_dir}/projection.pth\")\n","\n","print(f\"β
New 768-dim distilled encoder saved to {distilled_save_dir}\")\n","torch.cuda.empty_cache()"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"7LStjS1KD8hT","outputId":"14f8d492-054a-411c-da26-7f11307061fe"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","π¦ Extracting zip...\n","β
Found 250 images\n","β
Loaded 250 captions\n","\n","π Loading Flux VAE and encoding images...\n"]},{"output_type":"stream","name":"stderr","text":["/usr/local/lib/python3.12/dist-packages/huggingface_hub/utils/_validators.py:206: UserWarning: The `local_dir_use_symlinks` argument is deprecated and ignored in `hf_hub_download`. Downloading to a local directory does not use symlinks anymore.\n"," warnings.warn(\n","Encoding to latents: 96%|ββββββββββ| 239/250 [04:52<00:13, 1.21s/it]"]}]},{"cell_type":"code","source":["# ============================= CELL 2: Save All Assets to Drive =============================\n","# @title 2. Save Latents + New Distilled Encoder\n","\n","import os\n","import torch\n","\n","print(\"πΎ Saving all assets to Google Drive...\")\n","\n","# Ensure directories exist\n","os.makedirs(\"/content/drive/MyDrive/flux_klein_latents\", exist_ok=True)\n","os.makedirs(\"/content/drive/MyDrive/distilled_qwen_768_for_flux\", exist_ok=True)\n","\n","# Move latents if not already there\n","# (assuming they are already saved in Cell 1)\n","\n","print(\"β
Latents are in /content/drive/MyDrive/flux_klein_latents\")\n","print(\"β
New 768-dim distilled model is in /content/drive/MyDrive/distilled_qwen_768_for_flux\")\n","\n","print(\"\\nπ All data is safely saved on Google Drive.\")\n","print(\" You can now **disconnect and delete the runtime** if you want.\")\n","print(\" Everything needed for training is on Drive.\")\n","print(\" When you come back, start from Cell 3.\")"],"metadata":{"id":"9IGpdiL9BBr6"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 3: Install Dependencies & Setup =============================\n","# @title 3. Install Dependencies + Setup Parameters\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","from google.colab import drive, userdata\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","import gc\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep low for safety\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 64\n","\n","print(\"β
Dependencies installed and parameters set.\")\n","print(f\" Distilled encoder: {DISTILLED_DIR}\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","\n","# Optional: quick check\n","print(\"\\nπ Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"],"metadata":{"id":"ZZaadi1VBK6Z"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# ============================= CELL 4: LoRA Training with Debug Prints =============================\n","# @title 4. LoRA Training β FLUX.2-klein-base-4B + 768-dim Distilled Encoder\n","\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","from torch.utils.data import Dataset\n","from tqdm import tqdm\n","from transformers import Trainer, TrainingArguments, set_seed\n","from peft import LoraConfig, get_peft_model\n","from diffusers import FluxTransformer2DModel\n","\n","set_seed(42)\n","\n","print(\"=== CELL 4 START ===\")\n","print(f\"VRAM before anything: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","# ====================== 1. Load Distilled 768-dim Encoder ======================\n","print(\"\\n[DEBUG] Loading distilled 768-dim encoder...\")\n","tokenizer = AutoTokenizer.from_pretrained(DISTILLED_DIR)\n","\n","base_qwen = AutoModel.from_pretrained(\n"," \"Qwen/Qwen2.5-0.5B\", torch_dtype=torch.float32, device_map=\"auto\",\n"," trust_remote_code=True, low_cpu_mem_usage=True\n",")\n","student_model = PeftModel.from_pretrained(base_qwen, DISTILLED_DIR)\n","student_model.eval()\n","\n","projection = nn.Linear(base_qwen.config.hidden_size, 768).to(\"cuda\")\n","projection.load_state_dict(torch.load(f\"{DISTILLED_DIR}/projection.pth\", map_location=\"cuda\"))\n","projection.eval()\n","\n","print(\"[DEBUG] Distilled encoder loaded successfully (target dim 768)\")\n","\n","# ====================== 2. Load Latents & Texts ======================\n","print(\"\\n[DEBUG] Loading texts and latents...\")\n","data = torch.load(\"/content/drive/MyDrive/qwen_embeddings.pt\", weights_only=False) # or your saved texts\n","texts = data[\"texts\"]\n","print(f\"[DEBUG] Loaded {len(texts)} texts\")\n","\n","latent_files = sorted([f for f in os.listdir(LATENT_DIR) if f.endswith(\".pt\")])\n","latents = []\n","for lf in tqdm(latent_files, desc=\"[DEBUG] Loading latents\"):\n"," lat = torch.load(os.path.join(LATENT_DIR, lf), weights_only=False)\n"," if lat.dim() == 4 and lat.shape[0] == 1:\n"," lat = lat.squeeze(0)\n"," latents.append(lat)\n","latents = torch.stack(latents)\n","print(f\"[DEBUG] Loaded latents shape: {latents.shape}\")\n","\n","# Dataset (same as before)\n","class FluxLoRADataset(Dataset):\n"," def __init__(self, latents, texts):\n"," self.latents = latents\n"," self.texts = texts\n"," def __len__(self): return len(self.latents)\n"," def __getitem__(self, idx):\n"," return {\"latent\": self.latents[idx], \"text\": self.texts[idx]}\n","\n","dataset = FluxLoRADataset(latents, texts)\n","\n","def collate_fn(batch):\n"," return {\n"," \"latent\": torch.stack([item[\"latent\"] for item in batch]),\n"," \"texts\": [item[\"text\"] for item in batch]\n"," }\n","\n","# ====================== 3. Load Transformer + LoRA ======================\n","print(\"\\n[DEBUG] Loading FLUX.2-klein-base-4B transformer...\")\n","torch.cuda.empty_cache()\n","\n","transformer = FluxTransformer2DModel.from_pretrained(\n"," \"black-forest-labs/FLUX.2-klein-base-4B\",\n"," subfolder=\"transformer\",\n"," torch_dtype=torch.bfloat16,\n"," low_cpu_mem_usage=False,\n",").to(\"cuda\")\n","\n","print(f\"[DEBUG] Transformer loaded. VRAM now: {torch.cuda.memory_allocated()/1024**3:.2f} GB\")\n","\n","lora_config = LoraConfig(\n"," r=LORA_RANK, lora_alpha=LORA_ALPHA,\n"," target_modules=[\"attn.to_q\", \"attn.to_k\", \"attn.to_v\", \"attn.to_out.0\",\n"," \"attn.to_qkv_mlp_proj\", \"attn.add_q_proj\", \"attn.add_k_proj\",\n"," \"attn.add_v_proj\", \"attn.to_add_out\",\n"," \"ff.linear_in\", \"ff.linear_out\",\n"," \"ff_context.linear_in\", \"ff_context.linear_out\"],\n"," lora_dropout=0.05,\n"," bias=\"none\"\n",")\n","\n","transformer = get_peft_model(transformer, lora_config)\n","transformer.train()\n","\n","print(\"[DEBUG] LoRA applied successfully\")\n","\n","# ====================== 4. Trainer with heavy debug ======================\n","class FluxLoRATrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," print(f\"[DEBUG compute_loss] Batch size: {inputs['latent'].shape[0]}\")\n"," latents = inputs[\"latent\"].to(dtype=torch.bfloat16, device=model.device)\n"," raw_texts = inputs[\"texts\"]\n","\n"," text_inputs = tokenizer(raw_texts, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(\"cuda\")\n"," print(f\"[DEBUG] Text input shape: {text_inputs['input_ids'].shape}\")\n","\n"," with torch.no_grad():\n"," outputs = student_model(**text_inputs)\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," text_emb_768 = projection(hidden).to(dtype=torch.bfloat16)\n"," print(f\"[DEBUG] Projected embedding shape: {text_emb_768.shape}\")\n","\n"," batch_size = latents.shape[0]\n"," timesteps = torch.rand(batch_size, device=latents.device)\n"," noise = torch.randn_like(latents)\n","\n"," noisy_latents = (1 - timesteps.view(-1, 1, 1, 1)) * latents + timesteps.view(-1, 1, 1, 1) * noise\n","\n"," emb_unsqueezed = text_emb_768.unsqueeze(1)\n","\n"," print(f\"[DEBUG] Calling transformer with pooled_projections shape: {text_emb_768.shape}\")\n"," model_output = model(\n"," hidden_states=noisy_latents,\n"," timestep=timesteps * 1000,\n"," encoder_hidden_states=emb_unsqueezed,\n"," pooled_projections=text_emb_768,\n"," return_dict=False\n"," )[0]\n","\n"," target = noise - latents\n"," loss = F.mse_loss(model_output, target)\n","\n"," print(f\"[DEBUG] Loss value: {loss.item():.6f}\")\n"," return (loss, model_output) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"/content/flux_klein_lora\",\n"," per_device_train_batch_size=BATCH_SIZE,\n"," num_train_epochs=NUM_EPOCHS,\n"," learning_rate=LEARNING_RATE,\n"," lr_scheduler_type=\"cosine\",\n"," warmup_steps=50,\n"," bf16=True,\n"," logging_steps=10, # more frequent logs\n"," save_strategy=\"epoch\",\n"," save_total_limit=2,\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = FluxLoRATrainer(\n"," model=transformer,\n"," args=training_args,\n"," train_dataset=dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"\\nπ Starting LoRA training with heavy debug output...\")\n","trainer.train()\n","\n","# Save\n","final_lora_dir = FINAL_LORA_DIR\n","os.makedirs(final_lora_dir, exist_ok=True)\n","transformer.save_pretrained(final_lora_dir)\n","\n","print(f\"\\nβ
Training finished! LoRA saved to {final_lora_dir}\")\n","torch.cuda.empty_cache()"],"metadata":{"id":"XkYfTORjBLUx"},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[],"mount_file_id":"1rORehICZ99xZsrwMfy2w8Jxg6M55d81L","authorship_tag":"ABX9TyNsekxs/AxdrweXR2wp86ZR"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|