{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"9QVc2_k_bL3P","executionInfo":{"status":"aborted","timestamp":1775006100621,"user_tz":-120,"elapsed":107065,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}}},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"code","source":["# ============================= CELL 1: Prepare Latents + Distill 768-dim Text Encoder (Fixed Dtype) =============================\n","# @title 1. Full Preparation – Latents + 768-dim Distilled Encoder (Dtype Fixed)\n","\n","import os\n","import zipfile\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import numpy as np\n","from google.colab import drive\n","from PIL import Image\n","from tqdm import tqdm\n","from diffusers import AutoencoderKL\n","from transformers import AutoTokenizer, AutoModel\n","from datasets import Dataset as HFDataset\n","from torch.utils.data import Dataset\n","from peft import LoraConfig, get_peft_model\n","from transformers import Trainer, TrainingArguments, set_seed\n","\n","set_seed(42)\n","drive.mount('/content/drive', force_remount=True)\n","\n","zip_path = '/content/drive/MyDrive/my_set.zip' # @param {type:'string'}\n","\n","# ====================== 1. Extract Data ======================\n","print(\"πŸ“¦ Extracting zip...\")\n","extract_dir = \"/content/data\"\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","image_files = [f for f in os.listdir(extract_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.webp'))]\n","print(f\"βœ… Found {len(image_files)} images\")\n","\n","text_files = sorted([f for f in os.listdir(extract_dir) if f.endswith('.txt') and f[0].isdigit()])\n","texts = []\n","for tf in text_files:\n"," with open(os.path.join(extract_dir, tf), \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n"," if content:\n"," texts.append(content)\n","\n","print(f\"βœ… Loaded {len(texts)} captions\")\n","\n","# ====================== 2. Encode Images β†’ Flux VAE Latents ======================\n","latent_dir = \"/content/drive/MyDrive/flux_klein_latents\"\n","if os.path.exists(latent_dir) and len([f for f in os.listdir(latent_dir) if f.endswith(\".pt\")]) == len(image_files):\n"," print(f\"βœ… Using existing latents from {latent_dir}\")\n","else:\n"," print(\"\\nπŸŒ€ Encoding images to Flux VAE latents...\")\n"," vae = AutoencoderKL.from_pretrained(\n"," \"black-forest-labs/FLUX.1-dev\",\n"," subfolder=\"vae\",\n"," torch_dtype=torch.float32,\n"," device_map=\"auto\"\n"," )\n"," vae.eval()\n","\n"," os.makedirs(latent_dir, exist_ok=True)\n","\n"," with torch.no_grad():\n"," for img_file in tqdm(image_files, desc=\"Encoding latents\"):\n"," img_path = os.path.join(extract_dir, img_file)\n"," image = Image.open(img_path).convert(\"RGB\").resize((1024, 1024), Image.LANCZOS)\n","\n"," pixel_values = (torch.from_numpy(np.array(image)).permute(2, 0, 1).unsqueeze(0).float() / 255.0)\n"," pixel_values = pixel_values.to(vae.device, dtype=vae.dtype) * 2.0 - 1.0\n","\n"," latents = vae.encode(pixel_values).latent_dist.sample() * vae.config.scaling_factor\n"," latent_name = os.path.splitext(img_file)[0] + \".pt\"\n"," torch.save(latents.cpu(), os.path.join(latent_dir, latent_name))\n","\n"," del vae\n"," torch.cuda.empty_cache()\n"," print(f\"βœ… Latents saved to {latent_dir}\")\n","\n","# ====================== 3. Compute Teacher Embeddings & Project to 768 ======================\n","print(\"\\nπŸ“ Computing teacher embeddings and projecting to 768-dim...\")\n","\n","teacher_model_name = \"Qwen/Qwen3-Embedding-0.6B\"\n","tokenizer = AutoTokenizer.from_pretrained(teacher_model_name)\n","teacher_model = AutoModel.from_pretrained(\n"," teacher_model_name,\n"," torch_dtype=torch.float16,\n"," device_map=\"auto\",\n"," trust_remote_code=True\n",")\n","teacher_model.eval()\n","\n","teacher_embeddings = []\n","with torch.no_grad():\n"," for text in tqdm(texts, desc=\"Teacher encoding\"):\n"," inputs = tokenizer(text, padding=True, truncation=True, max_length=512, return_tensors=\"pt\").to(teacher_model.device)\n"," outputs = teacher_model(**inputs)\n"," emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu()\n"," teacher_embeddings.append(emb)\n","\n","teacher_embeddings_1024 = torch.stack(teacher_embeddings)\n","print(f\"βœ… Teacher embeddings (1024): {teacher_embeddings_1024.shape}\")\n","\n","# Fix: Move everything to float32 before projection\n","teacher_embeddings_1024 = teacher_embeddings_1024.to(torch.float32)\n","\n","teacher_proj = nn.Linear(1024, 768, dtype=torch.float32).to(\"cuda\")\n","with torch.no_grad():\n"," teacher_embeddings_768 = teacher_proj(teacher_embeddings_1024.to(\"cuda\")).cpu()\n","\n","print(f\"βœ… Projected teacher embeddings (768): {teacher_embeddings_768.shape}\")\n","\n","# Save projected teacher embeddings\n","torch.save({\n"," \"embeddings\": teacher_embeddings_768,\n"," \"texts\": texts,\n"," \"dim\": 768\n","}, \"/content/drive/MyDrive/qwen_embeddings_768.pt\")\n","\n","del teacher_model, teacher_proj\n","torch.cuda.empty_cache()\n","\n","# ====================== 4. Distill Student to 768-dim ======================\n","print(\"\\nπŸ‘¨β€πŸŽ“ Distilling student to 768-dim...\")\n","\n","student_model_name = \"Qwen/Qwen2.5-0.5B\"\n","base_student = AutoModel.from_pretrained(\n"," student_model_name, torch_dtype=torch.float32, device_map=\"auto\", trust_remote_code=True\n",")\n","\n","lora_config = LoraConfig(\n"," r=16,\n"," lora_alpha=32,\n"," target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\"],\n"," lora_dropout=0.05,\n"," bias=\"none\",\n"," task_type=\"FEATURE_EXTRACTION\"\n",")\n","student_model = get_peft_model(base_student, lora_config)\n","\n","projection = nn.Linear(base_student.config.hidden_size, 768).to(\"cuda\")\n","projection.train()\n","\n","hf_dataset = HFDataset.from_dict({\"text\": texts})\n","\n","class DistillationDataset(Dataset):\n"," def __init__(self, hf_dataset, tokenizer, teacher_embs, max_length=512):\n"," self.dataset = hf_dataset\n"," self.tokenizer = tokenizer\n"," self.teacher_embs = teacher_embs\n"," self.max_length = max_length\n","\n"," def __len__(self): return len(self.dataset)\n","\n"," def __getitem__(self, idx):\n"," text = self.dataset[idx][\"text\"]\n"," inputs = self.tokenizer(text, padding=\"max_length\", truncation=True, max_length=self.max_length, return_tensors=\"pt\")\n"," return {\n"," \"input_ids\": inputs[\"input_ids\"].squeeze(0),\n"," \"attention_mask\": inputs[\"attention_mask\"].squeeze(0),\n"," \"labels\": self.teacher_embs[idx],\n"," }\n","\n","distill_dataset = DistillationDataset(hf_dataset, tokenizer, teacher_embeddings_768)\n","\n","def collate_fn(batch):\n"," return {\n"," \"input_ids\": torch.stack([item[\"input_ids\"] for item in batch]),\n"," \"attention_mask\": torch.stack([item[\"attention_mask\"] for item in batch]),\n"," \"labels\": torch.stack([item[\"labels\"] for item in batch])\n"," }\n","\n","class DistillTrainer(Trainer):\n"," def compute_loss(self, model, inputs, return_outputs=False, num_items_in_batch=None):\n"," labels = inputs.pop(\"labels\").to(\"cuda\") # (B, 768)\n","\n"," outputs = model(input_ids=inputs[\"input_ids\"], attention_mask=inputs[\"attention_mask\"])\n"," hidden = outputs.last_hidden_state.mean(dim=1)\n"," student_emb = projection(hidden) # (B, 768)\n","\n"," student_norm = F.normalize(student_emb, p=2, dim=1)\n"," teacher_norm = F.normalize(labels, p=2, dim=1)\n","\n"," mse_loss = F.mse_loss(student_norm, teacher_norm)\n"," cos_loss = (1 - F.cosine_similarity(student_norm, teacher_norm, dim=1)).mean()\n"," loss = 0.25 * mse_loss + 0.75 * cos_loss\n","\n"," return (loss, outputs) if return_outputs else loss\n","\n","training_args = TrainingArguments(\n"," output_dir=\"./distilled_qwen_768\",\n"," per_device_train_batch_size=4,\n"," num_train_epochs=50,\n"," learning_rate=2e-4,\n"," fp16=True,\n"," logging_steps=50,\n"," save_strategy=\"no\",\n"," report_to=\"none\",\n"," remove_unused_columns=False,\n",")\n","\n","trainer = DistillTrainer(\n"," model=student_model,\n"," args=training_args,\n"," train_dataset=distill_dataset,\n"," data_collator=collate_fn,\n",")\n","\n","print(\"πŸš€ Starting distillation to 768-dim...\")\n","trainer.train()\n","\n","# ====================== Save ======================\n","distilled_save_dir = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","os.makedirs(distilled_save_dir, exist_ok=True)\n","student_model.save_pretrained(distilled_save_dir)\n","tokenizer.save_pretrained(distilled_save_dir)\n","torch.save(projection.state_dict(), f\"{distilled_save_dir}/projection.pth\")\n","\n","print(f\"\\nβœ… SUCCESS! 768-dim distilled encoder saved to {distilled_save_dir}\")\n","print(f\" Latents are ready in {latent_dir}\")\n","print(\" You can now run Cell 2.\")\n","\n","torch.cuda.empty_cache()"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000,"referenced_widgets":["95814e9c8dc44b518b9a7c97a3484846","c8d9604547d54bf4bcd76c9568d1c754","0037badf0b5a40bf8426dd2d9d9f1ef8","d716d08f79b34055a3298fea2c3b1a08","25b3d3b1a733453187ba4e03352a393a","f6e5aacbfaee458d8d6ed39a195445fa","6f1a999c78cb469aaab1d5fc7332a8f1","a883a9d294804336a21b6f3d895a4b85","80b2b7ddaca64eda852fed5f56e80395","4feaac6c0d8d4b139e7490f872566d4c","74720c6b5d654dc4a59b965376ce7fe3","7defd7439a03491ebc1b660858c577ee","dd9be47036f64ece95b3455117722a4e","0c8d6e583a8244fd8159585dd58c8a45","bdaf9a838bf348d09e4450b1974593aa","c0c9c02f8f5f469e96606a78b3b6db24","ac1da439af86454bbac1b3c150fefb76","313b48a9c28d4a9183ae889e2fc7e08e","0fa5d5e1534d42baad036989fdab2c6a","27dff4fb434f4a8990049bb4351679d1","31811d22cfd448eeba179efecc4d44f3","e33a15f3347a4aabab5e076030ca220e","e43448abb87e4196804286cecc565d30","1352574ca081414e9538a0c4863058ad","db4ad97cdd304b59b142923d72f7cd4d","c62510463ae740b2a86a33a31f059fd1","bc0f4dff61fe457189e5435b2eebdaed","49a783e6966d4a49af34d14ee771bbf4","c541995d44eb46bb98dac68272260ce0","7e6fccc2a9d444b59eab6a0d767c56c7","335bbfe93389441aba6da69656bf3ca3","8ae514c0dfe542ebaeabbd7c8da2479e","0672d199b0b842008faf8c4ab28ec6b7","1a8a324acc584b0fb60105b0d82c5db8","318c90f9f3f04ecabe874c4b10b3a369","9aea109f54574d0bb452b5771bc00426","edd77ba9c2c147d0b063bbdaf3df2ed7","9e7d560cec704906b003a629589e8efb","1b5f053d18674aa587c0e634ce6e483b","0917a3542fdb4b1e97e274ea06c80b23","d6b6c8ecd9a64c9db42d1fd7a86cff4c","78bb15a7ec6947db8f5ad6b10f8c646a","ea5a9b3a607347e9bb8268243de5a507","02c44deba420470ba7b239d0f5e77c44"]},"id":"lfqwgHHYn4bW","executionInfo":{"status":"ok","timestamp":1775007673685,"user_tz":-120,"elapsed":1311321,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}},"outputId":"2d2e6911-52b2-4e3c-8762-8b7e5dc4c402"},"execution_count":3,"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","πŸ“¦ Extracting zip...\n","βœ… Found 250 images\n","βœ… Loaded 250 captions\n","βœ… Using existing latents from /content/drive/MyDrive/flux_klein_latents\n","\n","πŸ“ Computing teacher embeddings and projecting to 768-dim...\n"]},{"output_type":"display_data","data":{"text/plain":["Loading weights: 0%| | 0/310 [00:00"],"text/html":["\n","
\n"," \n"," \n"," [3150/3150 20:44, Epoch 50/50]\n","
\n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n"," \n","
StepTraining Loss
500.306407
1000.068270
1500.046029
2000.040868
2500.037660
3000.037948
3500.036726
4000.036342
4500.035160
5000.036195
5500.034873
6000.034670
6500.033394
7000.034004
7500.032754
8000.032264
8500.031586
9000.031454
9500.029183
10000.029279
10500.029389
11000.027533
11500.026483
12000.026171
12500.026032
13000.025056
13500.024446
14000.024373
14500.023412
15000.022667
15500.022285
16000.021616
16500.021353
17000.021070
17500.020380
18000.020133
18500.019799
19000.018946
19500.018862
20000.017926
20500.018461
21000.017476
21500.017427
22000.017166
22500.016884
23000.016194
23500.016499
24000.015926
24500.015583
25000.015570
25500.015187
26000.015098
26500.014849
27000.014774
27500.014445
28000.014186
28500.014396
29000.014029
29500.014093
30000.013511
30500.013682
31000.013810
31500.013562

"]},"metadata":{}},{"output_type":"stream","name":"stdout","text":["\n","βœ… SUCCESS! 768-dim distilled encoder saved to /content/drive/MyDrive/distilled_qwen_768_for_flux\n"," Latents are ready in /content/drive/MyDrive/flux_klein_latents\n"," You can now run Cell 2.\n"]}]},{"cell_type":"code","execution_count":null,"metadata":{"id":"9IGpdiL9BBr6","executionInfo":{"status":"aborted","timestamp":1775006100585,"user_tz":-120,"elapsed":0,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}}},"outputs":[],"source":["# ============================= CELL 2: Save All Assets to Drive =============================\n","# @title 2. Save Latents + New Distilled Encoder\n","\n","import os\n","import torch\n","\n","print(\"πŸ’Ύ Saving all assets to Google Drive...\")\n","\n","# Ensure directories exist\n","os.makedirs(\"/content/drive/MyDrive/flux_klein_latents\", exist_ok=True)\n","os.makedirs(\"/content/drive/MyDrive/distilled_qwen_768_for_flux\", exist_ok=True)\n","\n","# Move latents if not already there\n","# (assuming they are already saved in Cell 1)\n","\n","print(\"βœ… Latents are in /content/drive/MyDrive/flux_klein_latents\")\n","print(\"βœ… New 768-dim distilled model is in /content/drive/MyDrive/distilled_qwen_768_for_flux\")\n","\n","print(\"\\nπŸŽ‰ All data is safely saved on Google Drive.\")\n","print(\" You can now **disconnect and delete the runtime** if you want.\")\n","print(\" Everything needed for training is on Drive.\")\n","print(\" When you come back, start from Cell 3.\")"]},{"cell_type":"code","source":["# ================================================\n","# CELL 3: Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":73},"id":"FQF71-mvmlc1","executionInfo":{"status":"ok","timestamp":1775007678343,"user_tz":-120,"elapsed":4651,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}},"outputId":"ab97d8ee-a278-4d0b-9a6b-9f99a47946ab"},"execution_count":4,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ”Œ Disconnecting Colab session in 15 seconds...\n","Session disconnected.\n"]}]},{"cell_type":"markdown","metadata":{"id":"cfshTDIFM5ND"},"source":["You can disconnect the colab past thispoint. All data from cells 1 and 2 are saved to drive."]},{"cell_type":"code","execution_count":1,"metadata":{"id":"ZZaadi1VBK6Z","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1775037684456,"user_tz":-120,"elapsed":65934,"user":{"displayName":"Nnekos4Lyfe","userId":"11521490251288716219"}},"outputId":"b1868b7a-dc28-4dd1-c8b9-c23005d53eb1"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","βœ… Dependencies installed and parameters set.\n"," Distilled encoder: /content/drive/MyDrive/distilled_qwen_768_for_flux\n"," Batch size: 1 | Epochs: 8\n","\n","πŸ” Quick VRAM check:\n","memory.used [MiB], memory.total [MiB]\n","3 MiB, 15360 MiB\n"]}],"source":["# ============================= CELL 3: Install Dependencies & Setup =============================\n","# @title 3. Install Dependencies + Setup Parameters\n","\n","!pip install -q diffusers transformers peft accelerate datasets tqdm\n","\n","import os\n","import torch\n","from google.colab import drive, userdata\n","from transformers import AutoTokenizer, AutoModel\n","from peft import PeftModel\n","import gc\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# ====================== Parameters ======================\n","DISTILLED_DIR = \"/content/drive/MyDrive/distilled_qwen_768_for_flux\"\n","LATENT_DIR = \"/content/drive/MyDrive/flux_klein_latents\"\n","FINAL_LORA_DIR = \"/content/drive/MyDrive/flux_klein_lora_final\"\n","\n","BATCH_SIZE = 1 # Keep low for safety\n","NUM_EPOCHS = 8\n","LEARNING_RATE = 1e-4\n","LORA_RANK = 32\n","LORA_ALPHA = 32\n","\n","print(\"βœ… Dependencies installed and parameters set.\")\n","print(f\" Distilled encoder: {DISTILLED_DIR}\")\n","print(f\" Batch size: {BATCH_SIZE} | Epochs: {NUM_EPOCHS}\")\n","\n","# Optional: quick check\n","print(\"\\nπŸ” Quick VRAM check:\")\n","!nvidia-smi --query-gpu=memory.used,memory.total --format=csv"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"fYbQfjC9RBWY","colab":{"base_uri":"https://localhost:8080/","height":403,"referenced_widgets":["79b369a7323440c99a2e015dd979be21","01c583deaecb4ac698e129b48e0314b4","ca5e5f3a32734d66960d52284bbd9114","f94cd562f8264342a2a486fb2f6073d0","96748059b87847bba153debc0b7429d1","ef6d5f8203084bbab388a8267d726966","83413a807bee41619f55a36f3b0a8b79","3eace6b6073e44d58c44f5c8e31fbe29","d2169f49fa7645bba6799bf5dd746529","34a10b8852e44923b50bcce811b361c8","b04e3f34d0994ee1b03e5a8e4ee32661","374264f2b3dc4b949b521238ec1bc5ff","a594f6caa4fd4cc19b4f2c707eea565a","4743ff73d29945f9b14989a7f060432f","c2e6f02c929b49a5aeff100ba1b0c967","21906080febc4e87a5f71555e848c293","a51853bf0d1a4ccc8f2f1715cd0b106e","1f38f74042134a46972a14b914579468","d9df9b1110be49cbbf5064bbeb805387","1d362219aeb14bedbacbd26d7279bd0d","dd2c0d78295f4fa1b01fa8e2ce746eb2","ef575574cf7c4fa7b71a07833529194f","6b28e42f6eac46b885bbcfa5f214aa34","5a944b1b49d740b0b0395d710b204ff0","e4ef583912074fcf83c49a85003a4bc4","841a3b705f514901bd7b9b2fb5603460","30289f01d99a4c73a204684d02898220","675682b1156d4254bb0118b3436cce6d","1e8b471b275144929dd33a05139968ff","5489e21441ce4e44b1beec11e695d0fe","a2d2189312904d3282bd24ab88fe30ac","9b7fcfc97ef641758dd3dc3de7af2e32","20485ca3800141dc9942888cc187331e"]},"outputId":"03428952-2f55-4265-882d-883eec7d6d29"},"outputs":[{"output_type":"stream","name":"stdout","text":["Mounted at /content/drive\n","=== CELL 4 START - Clean Restart ===\n","Current VRAM used: 1.85 GB\n","\n","[1/5] Loading distilled 768-dim Qwen encoder...\n"]},{"output_type":"display_data","data":{"text/plain":["Loading weights: 0%| | 0/290 [00:00