Datasets:
Tags:
Not-For-All-Audiences
Upload T5_mapper.ipynb
Browse files- T5_mapper.ipynb +1 -1
T5_mapper.ipynb
CHANGED
|
@@ -1 +1 @@
|
|
| 1 |
-
{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":7491,"status":"ok","timestamp":1774958926113,"user":{"displayName":"No Name","userId":"10578412414437288386"},"user_tz":-120},"id":"6ceGJfjyvY4l","outputId":"82980427-f490-42c2-daf6-820c900ad0b7"},"outputs":[{"name":"stdout","output_type":"stream","text":["π Checking GPU runtime...\n","β
GPU ready: Tesla T4\n","β
VRAM: 14.6 GB\n"," CUDA version: 12.8\n"," Device index: 0\n","β
GPU check passed\n"]}],"source":["# @markdown # Cell 1: GPU Check & Runtime Validation\n","\n","import torch\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π Checking GPU runtime...\")\n","\n","if torch.cuda.is_available():\n"," gpu_name = torch.cuda.get_device_name(0)\n"," vram_gb = torch.cuda.get_device_properties(0).total_memory / 1024**3\n"," print(f\"β
GPU ready: {gpu_name}\")\n"," print(f\"β
VRAM: {vram_gb:.1f} GB\")\n"," if debug:\n"," print(f\" CUDA version: {torch.version.cuda}\")\n"," print(f\" Device index: {torch.cuda.current_device()}\")\n","else:\n"," print(\"β No GPU detected!\")\n"," print(\" Please go to: Runtime β Change runtime type β Hardware accelerator β GPU\")\n"," raise SystemExit(\"GPU is required. Please enable GPU runtime.\")\n","\n","print(\"β
GPU check passed\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":13672,"status":"ok","timestamp":1774958939787,"user":{"displayName":"No Name","userId":"10578412414437288386"},"user_tz":-120},"id":"ajsx94iUvbQH","outputId":"d6b8253e-1e34-4617-e553-fb1aeea9cdf8"},"outputs":[{"name":"stdout","output_type":"stream","text":["π¦ Installing dependencies...\n","\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m60.7/60.7 MB\u001b[0m \u001b[31m211.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m1.5/1.5 MB\u001b[0m \u001b[31m270.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m44.8/44.8 kB\u001b[0m \u001b[31m205.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25hβ
Packages installed successfully\n"," Installed: transformers, accelerate, bitsandbytes, open_clip_torch\n"]}],"source":["# @markdown # Cell 2: Install Required Packages\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π¦ Installing dependencies...\")\n","!pip install -q --no-cache-dir transformers accelerate bitsandbytes open_clip_torch\n","\n","print(\"β
Packages installed successfully\")\n","if debug:\n"," print(\" Installed: transformers, accelerate, bitsandbytes, open_clip_torch\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"0npx6gtpvhih"},"outputs":[],"source":["# @markdown # Cell 3: Mount Google Drive\n","\n","from google.colab import drive\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","if debug:\n"," print(f\" Drive mounted at: /content/drive\")\n"," print(f\" MyDrive contents sample: {os.listdir('/content/drive/MyDrive')[:5] if os.path.exists('/content/drive/MyDrive') else 'Not accessible yet'}\")\n","\n","print(\"β
Google Drive mounted\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"4nMyIVRJvkiY"},"outputs":[],"source":["# @markdown # Cell 4: Configuration & Parameters\n","\n","zip_path = \"/content/drive/MyDrive/dino_tagger_output.zip\" #@param {type:\"string\"}\n","extract_dir = \"/content/dataset\" #@param {type:\"string\"}\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"βοΈ Configuration loaded\")\n","print(f\" Zip path : {zip_path}\")\n","print(f\" Extract dir : {extract_dir}\")\n","\n","if debug:\n"," print(f\" Debug mode enabled β Extra printouts will be shown\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"H6GQcTn9vnzx"},"outputs":[],"source":["# @markdown # Cell 5: Unzip Dataset\n","\n","import os\n","import zipfile\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","print(\"π¦ Extracting zip file...\")\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","print(f\"β
Dataset extracted to: {extract_dir}\")\n","\n","if debug:\n"," total_files = sum([len(files) for r, d, files in os.walk(extract_dir)])\n"," print(f\" Total files extracted: {total_files}\")\n"," print(f\" Top-level folders/files: {os.listdir(extract_dir)[:10]}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ELEdDOuHvrkA"},"outputs":[],"source":["# @markdown # Cell 6: Find Paired Text & Image Files\n","\n","from collections import defaultdict\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","text_files = {}\n","image_files = {}\n","image_exts = ('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp')\n","\n","for root, _, files in os.walk(extract_dir):\n"," for filename in files:\n"," base = os.path.splitext(filename)[0]\n"," ext = os.path.splitext(filename)[1].lower()\n"," full_path = os.path.join(root, filename)\n","\n"," if ext == '.txt':\n"," text_files[base] = full_path\n"," elif ext in image_exts:\n"," image_files[base] = full_path\n","\n","# Keep only complete pairs\n","pairs = {base: (text_files[base], image_files[base])\n"," for base in text_files if base in image_files}\n","\n","print(f\"β
Found {len(pairs)} valid text-image pairs\")\n","\n","if debug:\n"," print(f\" Total .txt files found : {len(text_files)}\")\n"," print(f\" Total image files found: {len(image_files)}\")\n"," if len(pairs) > 0:\n"," sample_base = list(pairs.keys())[0]\n"," print(f\" Sample pair: {sample_base}.txt + {sample_base}{os.path.splitext(pairs[sample_base][1])[1]}\")\n"," else:\n"," print(\" β οΈ No pairs found. Check that filenames match exactly (without extension)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"ot4ZcVtwxcPu"},"outputs":[],"source":["# @markdown # Cell 7: Load T5-XXL Encoder-Only β Encode Texts β Save β Unload VRAM\n","\n","import torch\n","import numpy as np\n","from transformers import T5EncoderModel, T5Tokenizer, BitsAndBytesConfig\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","# ====================== Model & VRAM Info ======================\n","print(\"π Using dedicated T5-XXL **encoder-only** model:\")\n","print(\" β’ Repo: mcmonkey/google_t5-v1_1-xxl_encoderonly\")\n","print(\" β’ Contains ONLY the encoder (~4.76B params)\")\n","print(\" β’ Much smaller download than the full 46 GB model\")\n","print(\" β’ With 4-bit quantization: ~2.5 β 3.5 GB VRAM expected\\n\")\n","\n","print(\"π§ Loading T5-XXL encoder-only (4-bit quantized)...\")\n","\n","model_id = \"mcmonkey/google_t5-v1_1-xxl_encoderonly\"\n","\n","tokenizer = T5Tokenizer.from_pretrained(model_id)\n","\n","quant_config = BitsAndBytesConfig(\n"," load_in_4bit=True,\n"," bnb_4bit_compute_dtype=torch.float16,\n"," bnb_4bit_use_double_quant=True,\n"," bnb_4bit_quant_type=\"nf4\"\n",")\n","\n","text_encoder = T5EncoderModel.from_pretrained(\n"," model_id,\n"," quantization_config=quant_config,\n"," device_map=\"auto\",\n"," torch_dtype=torch.float16\n",")\n","text_encoder.eval()\n","\n","print(\"β
T5-XXL encoder-only loaded successfully\")\n","\n","if debug:\n"," try:\n"," allocated = torch.cuda.memory_allocated() / (1024**3)\n"," print(f\" VRAM allocated after loading: {allocated:.2f} GB\")\n"," except:\n"," pass\n","\n","# ====================== Encode Texts ======================\n","t5_embeddings = []\n","max_length = 512\n","\n","print(f\"π Encoding {len(pairs)} texts with T5-XXL encoder...\")\n","\n","for idx, (base, (txt_path, _)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Processed {idx}/{len(pairs)} texts...\")\n","\n"," with open(txt_path, 'r', encoding='utf-8') as f:\n"," text = f.read().strip()\n","\n"," if not text:\n"," if debug:\n"," print(f\" Skipped empty text: {base}\")\n"," continue\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," max_length=max_length,\n"," truncation=True,\n"," return_tensors=\"pt\"\n"," )\n","\n"," input_ids = inputs.input_ids.to(\"cuda\")\n"," attention_mask = inputs.attention_mask.to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = text_encoder(input_ids=input_ids, attention_mask=attention_mask)\n"," # Mean pooling β 4096-dim embedding\n"," t5_emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu().numpy()\n","\n"," t5_embeddings.append(t5_emb)\n","\n","t5_embs = np.array(t5_embeddings)\n","t5_save_path = \"/content/t5_embeddings.npy\"\n","np.save(t5_save_path, t5_embs)\n","\n","print(f\"β
Encoded {len(t5_embs)} texts\")\n","print(f\"πΎ Saved to: {t5_save_path} (shape: {t5_embs.shape})\")\n","\n","# ====================== Unload T5 to free VRAM ======================\n","del text_encoder\n","torch.cuda.empty_cache()\n","\n","print(\"π§Ή T5 encoder unloaded from VRAM\")\n","if debug:\n"," try:\n"," print(f\" VRAM allocated after unload: {torch.cuda.memory_allocated() / (1024**3):.2f} GB\")\n"," except:\n"," pass"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ZWLwMFi_zMDk"},"outputs":[],"source":["# @markdown # Cell 8: Load OpenCLIP ViT-B-32 β Encode Images\n","\n","import torch\n","import numpy as np\n","from PIL import Image\n","import open_clip\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","print(f\"π§ Using device: {device}\")\n","\n","# ====================== Load OpenCLIP ======================\n","print(\"πΈ Loading OpenCLIP ViT-B-32 (laion400m_e32)...\")\n","clip_model, _, preprocess = open_clip.create_model_and_transforms(\n"," \"ViT-B-32\", pretrained=\"laion400m_e32\"\n",")\n","clip_model.to(device)\n","clip_model.eval()\n","\n","print(\"β
OpenCLIP loaded successfully\")\n","\n","if debug:\n"," try:\n"," mem = torch.cuda.memory_allocated() / (1024**3)\n"," print(f\" VRAM used after loading CLIP: {mem:.2f} GB\")\n"," except:\n"," pass\n","\n","# ====================== Encode Images ======================\n","clip_embeddings = []\n","\n","print(f\"π Encoding {len(pairs)} images...\")\n","\n","# Load T5 embeddings (to keep order consistent)\n","t5_embs = np.load(\"/content/t5_embeddings.npy\")\n","\n","for idx, (base, (_, img_path)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Processed {idx}/{len(pairs)} images...\")\n","\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," clip_emb = clip_model.encode_image(image).squeeze(0).cpu().numpy()\n"," clip_embeddings.append(clip_emb)\n"," except Exception as e:\n"," if debug:\n"," print(f\" β οΈ Error on image {base}: {e}\")\n"," continue\n","\n","clip_embs = np.array(clip_embeddings)\n","clip_save_path = \"/content/clip_embeddings.npy\"\n","np.save(clip_save_path, clip_embs)\n","\n","print(f\"β
Encoded {len(clip_embs)} images\")\n","print(f\"πΎ Saved to: {clip_save_path} (shape: {clip_embs.shape})\")\n","\n","print(f\" T5 shape : {t5_embs.shape}\")\n","print(f\" CLIP shape: {clip_embs.shape}\")\n","if len(t5_embs) != len(clip_embs):\n"," print(\"β οΈ Note: Some images may have failed to process (shapes don't match)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"MMoH6jm2zioL"},"outputs":[],"source":["# @markdown # Cell 9: Train Mapper (T5 embedding β CLIP image embedding)\n","\n","import torch.nn as nn\n","from torch.utils.data import TensorDataset, DataLoader\n","import torch\n","import numpy as np\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# Load saved embeddings\n","t5_embs = np.load(\"/content/t5_embeddings.npy\")\n","clip_embs = np.load(\"/content/clip_embeddings.npy\")\n","\n","print(f\"π Loaded T5 embeddings : {t5_embs.shape}\")\n","print(f\"π Loaded CLIP embeddings: {clip_embs.shape}\")\n","\n","class T5toCLIPMapper(nn.Module):\n"," def __init__(self, input_dim=4096, output_dim=512, hidden_dim=2048):\n"," super().__init__()\n"," self.net = nn.Sequential(\n"," nn.Linear(input_dim, hidden_dim),\n"," nn.ReLU(),\n"," nn.Linear(hidden_dim, hidden_dim),\n"," nn.ReLU(),\n"," nn.Linear(hidden_dim, output_dim)\n"," )\n","\n"," def forward(self, x):\n"," return self.net(x)\n","\n","# Dataset & Loader\n","dataset = TensorDataset(\n"," torch.from_numpy(t5_embs).float(),\n"," torch.from_numpy(clip_embs).float()\n",")\n","dataloader = DataLoader(dataset, batch_size=16, shuffle=True)\n","\n","model = T5toCLIPMapper().to(device)\n","optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n","criterion = nn.MSELoss()\n","\n","num_epochs = 50 #@param {type:\"integer\"}\n","\n","print(f\"π Starting training for {num_epochs} epochs...\")\n","\n","for epoch in range(num_epochs):\n"," total_loss = 0.0\n"," for inputs, targets in dataloader:\n"," inputs = inputs.to(device)\n"," targets = targets.to(device)\n","\n"," optimizer.zero_grad()\n"," outputs = model(inputs)\n"," loss = criterion(outputs, targets)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n","\n"," avg_loss = total_loss / len(dataloader)\n"," if (epoch + 1) % 10 == 0 or epoch == 0:\n"," print(f\"Epoch {epoch+1:2d}/{num_epochs} | Avg Loss: {avg_loss:.6f}\")\n","\n","# Save\n","save_path = \"/content/t5_to_clip_mapper.pth\"\n","torch.save(model.state_dict(), save_path)\n","\n","print(\"π Training completed!\")\n","print(f\"β
Mapper saved to: {save_path}\")\n","\n","if debug:\n"," print(f\" Final average loss: {avg_loss:.6f}\")"]},{"cell_type":"code","source":["# @markdown # Cell 10: Save Model + Embeddings to Google Drive\n","\n","import shutil\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# @markdown ### Choose folder in your Drive (change if needed)\n","drive_save_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","\n","os.makedirs(drive_save_folder, exist_ok=True)\n","\n","# Files to save\n","files_to_save = {\n"," \"/content/clip_to_t5_mapper.pth\": f\"{drive_save_folder}/clip_to_t5_mapper.pth\",\n"," \"/content/t5_embeddings.npy\": f\"{drive_save_folder}/t5_embeddings.npy\",\n"," \"/content/clip_embeddings.npy\": f\"{drive_save_folder}/clip_embeddings.npy\"\n","}\n","\n","print(f\"πΎ Saving files to Google Drive folder: {drive_save_folder}\")\n","\n","for local_path, drive_path in files_to_save.items():\n"," if os.path.exists(local_path):\n"," shutil.copy2(local_path, drive_path)\n"," print(f\"β
Saved: {os.path.basename(local_path)} β {drive_path}\")\n"," else:\n"," print(f\"β οΈ File not found: {local_path}\")\n","\n","print(\"\\nπ All files saved to Google Drive!\")\n","if debug:\n"," print(f\" Drive folder contents: {os.listdir(drive_save_folder)}\")"],"metadata":{"id":"N0CQFqwX4TBz"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Step 1: Mount Google Drive\n","from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"AEA3l5bSLSo1"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 11: Load Model + Embeddings from Google Drive\n","\n","import torch\n","import numpy as np\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","\n","# @markdown ### Path to your saved folder in Drive\n","drive_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","\n","# File paths\n","mapper_path = f\"{drive_folder}/clip_to_t5_mapper.pth\"\n","t5_path = f\"{drive_folder}/t5_embeddings.npy\"\n","clip_path = f\"{drive_folder}/clip_embeddings.npy\"\n","\n","print(\"π Loading files from Google Drive...\")\n","\n","# Load embeddings\n","t5_embs = np.load(t5_path)\n","clip_embs = np.load(clip_path)\n","\n","print(f\"β
Loaded T5 embeddings : {t5_embs.shape}\")\n","print(f\"β
Loaded CLIP embeddings: {clip_embs.shape}\")\n","\n","# Define the model class again (needed for loading state_dict)\n","class CLIPtoT5Mapper(torch.nn.Module):\n"," def __init__(self, input_dim=512, output_dim=4096, hidden_dim=2048):\n"," super().__init__()\n"," self.net = torch.nn.Sequential(\n"," torch.nn.Linear(input_dim, hidden_dim),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim, hidden_dim * 2),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim * 2, output_dim)\n"," )\n","\n"," def forward(self, x):\n"," return self.net(x)\n","\n","# Load the trained mapper\n","model = CLIPtoT5Mapper().to(device)\n","model.load_state_dict(torch.load(mapper_path, map_location=device))\n","model.eval()\n","\n","print(\"β
CLIP β T5 mapper loaded successfully\")\n","print(f\" Model ready on device: {device}\")\n","\n","if debug:\n"," print(f\" Mapper path: {mapper_path}\")\n"," print(f\" Total parameters: {sum(p.numel() for p in model.parameters()):,}\")"],"metadata":{"id":"L9SM8hbM4KCK"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 12: Visualize Differences β CLIP vs CLIP, T5 vs T5, Predicted T5 vs Predicted T5, and Predicted vs True T5\n","\n","import torch\n","import numpy as np\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π Preparing visualizations for embedding differences...\")\n","\n","# Make sure we have everything loaded (run Cell 11 first)\n","if 'model' not in globals() or 'clip_embs' not in globals() or 't5_embs' not in globals():\n"," print(\"β οΈ Please run Cell 11 first to load model + embeddings!\")\n"," raise SystemExit(\"Missing data\")\n","\n","# ====================== 1. Compute predicted T5 from CLIP ======================\n","print(\"π Computing predicted T5 embeddings from CLIP inputs...\")\n","with torch.no_grad():\n"," clip_tensor = torch.from_numpy(clip_embs).float().to(device)\n"," predicted_t5 = model(clip_tensor).cpu().numpy()\n","\n","print(f\"β
Predicted T5 shape: {predicted_t5.shape}\")\n","\n","# ====================== 2. Pairwise cosine similarities WITHIN each set ======================\n","print(\"π Calculating pairwise cosine similarities (within each embedding group)...\")\n","\n","def get_pairwise_similarities(embeddings):\n"," \"\"\"Return off-diagonal cosine similarities (excluding self-similarity=1.0)\"\"\"\n"," sim_matrix = cosine_similarity(embeddings)\n"," np.fill_diagonal(sim_matrix, np.nan) # ignore diagonal (always 1.0)\n"," return sim_matrix.flatten()[~np.isnan(sim_matrix.flatten())]\n","\n","# CLIP vs CLIP (among themselves)\n","clip_sim = get_pairwise_similarities(clip_embs)\n","\n","# True T5 vs True T5 (among themselves)\n","t5_sim = get_pairwise_similarities(t5_embs)\n","\n","# Predicted T5 vs Predicted T5 (among themselves)\n","pred_t5_sim = get_pairwise_similarities(predicted_t5)\n","\n","# Predicted T5 vs True T5 (the most important metric)\n","pred_true_sim = np.diag(cosine_similarity(predicted_t5, t5_embs))\n","\n","print(f\" CLIP vs CLIP mean similarity : {clip_sim.mean():.4f}\")\n","print(f\" True T5 vs True T5 mean similarity: {t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs Predicted T5 : {pred_t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs True T5 (model quality): {pred_true_sim.mean():.4f}\")\n","\n","# ====================== 3. Dimensionality reduction for scatter plots ======================\n","print(\"π Reducing dimensions with PCA...\")\n","\n","# PCA on CLIP space\n","pca_clip = PCA(n_components=2, random_state=42)\n","clip_2d = pca_clip.fit_transform(clip_embs)\n","\n","# PCA on T5 space (true + predicted in SAME space)\n","all_t5 = np.vstack([t5_embs, predicted_t5])\n","pca_t5 = PCA(n_components=2, random_state=42)\n","t5_2d = pca_t5.fit_transform(all_t5)\n","true_t5_2d = t5_2d[:len(t5_embs)]\n","pred_t5_2d = t5_2d[len(t5_embs):]\n","\n","print(f\" PCA explained variance β CLIP: {pca_clip.explained_variance_ratio_.sum():.1%}\")\n","print(f\" PCA explained variance β T5 : {pca_t5.explained_variance_ratio_.sum():.1%}\")\n","\n","# ====================== 4. Plotting ======================\n","plt.figure(figsize=(20, 12))\n","\n","# Row 1: Pairwise similarity histograms (within each group)\n","plt.subplot(2, 3, 1)\n","sns.histplot(clip_sim, kde=True, color='skyblue', bins=50)\n","plt.title('CLIP vs CLIP\\n(Pairwise Cosine Similarity)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 2)\n","sns.histplot(t5_sim, kde=True, color='green', bins=50)\n","plt.title('True T5 vs True T5\\n(Pairwise Cosine Similarity)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 3)\n","sns.histplot(pred_t5_sim, kde=True, color='orange', bins=50)\n","plt.title('Predicted T5 vs Predicted T5\\n(Pairwise Cosine Similarity)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","# Row 2: Model quality + geometry\n","plt.subplot(2, 3, 4)\n","sns.histplot(pred_true_sim, kde=True, color='purple', bins=50)\n","plt.title('Predicted T5 vs True T5\\n(Model Quality β Per-Pair)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 5)\n","plt.scatter(clip_2d[:, 0], clip_2d[:, 1], alpha=0.7, s=25, color='skyblue')\n","plt.title('CLIP Embeddings (PCA 2D)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 6)\n","plt.scatter(true_t5_2d[:, 0], true_t5_2d[:, 1], alpha=0.7, s=25, color='green', label='True T5')\n","plt.scatter(pred_t5_2d[:, 0], pred_t5_2d[:, 1], alpha=0.7, s=25, color='red', label='Predicted T5')\n","plt.title('True T5 vs Predicted T5\\n(in shared PCA space)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","\n","plt.tight_layout()\n","plt.show()\n","\n","# ====================== 5. Optional: L2 distance histogram ======================\n","plt.figure(figsize=(8, 5))\n","diff_vectors = predicted_t5 - t5_embs\n","diff_norms = np.linalg.norm(diff_vectors, axis=1)\n","sns.histplot(diff_norms, kde=True, color='crimson', bins=50)\n","plt.title('L2 (Euclidean) Distance: Predicted T5 β True T5')\n","plt.xlabel('Distance')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","plt.show()\n","\n","print(f\"\\nβ
All plots generated successfully!\")\n","print(f\" Number of samples used: {len(clip_embs)}\")\n","if debug:\n"," print(\"\\nπ‘ Interpretation guide:\")\n"," print(\" β’ First three histograms: How clustered / diverse each embedding type is internally\")\n"," print(\" β’ Fourth histogram: How close the modelβs predictions are to the real T5 embeddings\")\n"," print(\" β’ Scatter plots: Visual geometry and overlap between true vs predicted T5\")"],"metadata":{"id":"lb-d8VnI4HXP"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 13: Install Dependencies, GPU Check & Mount Drive\n","\n","import torch\n","import subprocess\n","import sys\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# ====================== GPU Check ======================\n","print(\"π Checking GPU...\")\n","if torch.cuda.is_available():\n"," print(f\"β
GPU ready: {torch.cuda.get_device_name(0)}\")\n"," print(f\"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB\")\n","else:\n"," print(\"β No GPU detected! Please enable GPU runtime.\")\n"," raise SystemExit(\"GPU is required\")\n","\n","# ====================== Install Dependencies ======================\n","print(\"\\nπ¦ Installing required packages...\")\n","\n","!pip install -q --no-cache-dir open_clip_torch transformers accelerate bitsandbytes\n","\n","print(\"β
All dependencies installed successfully\")\n","\n","# ====================== Mount Google Drive ======================\n","from google.colab import drive\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"β
Google Drive mounted\")\n","if debug:\n"," print(f\" Drive path: /content/drive/MyDrive\")"],"metadata":{"id":"fhK7ma507JmQ"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 14: Process New Zip File β Generate T5 + CLIP Embeddings β Save to Drive\n","\n","import torch\n","import numpy as np\n","from PIL import Image\n","import open_clip\n","import os\n","import zipfile\n","from transformers import T5EncoderModel, T5Tokenizer, BitsAndBytesConfig\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# ====================== PARAMETERS ======================\n","zip_path = \"/content/drive/MyDrive/lisa_simpson.zip\" #@param {type:\"string\"}\n","drive_save_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","extract_dir = \"/content/new_dataset\" #@param {type:\"string\"}\n","\n","print(f\"π Processing zip file: {zip_path}\")\n","print(f\"πΎ Will save embeddings to: {drive_save_folder}\")\n","\n","# ====================== 1. Unzip the Dataset ======================\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","device =\"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","\n","print(\"π¦ Extracting zip file...\")\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","print(f\"β
Extracted to: {extract_dir}\")\n","\n","# ====================== 2. Find Paired Text & Image Files ======================\n","text_files = {}\n","image_files = {}\n","image_exts = ('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp')\n","\n","for root, _, files in os.walk(extract_dir):\n"," for filename in files:\n"," base = os.path.splitext(filename)[0]\n"," ext = os.path.splitext(filename)[1].lower()\n"," full_path = os.path.join(root, filename)\n","\n"," if ext == '.txt':\n"," text_files[base] = full_path\n"," elif ext in image_exts:\n"," image_files[base] = full_path\n","\n","pairs = {base: (text_files[base], image_files[base])\n"," for base in text_files if base in image_files}\n","\n","print(f\"β
Found {len(pairs)} valid text-image pairs\")\n","\n","if debug and len(pairs) > 0:\n"," sample = list(pairs.keys())[0]\n"," print(f\" Sample pair: {sample}.txt + {sample}{os.path.splitext(pairs[sample][1])[1]}\")\n","\n","# ====================== 3. T5-XXL Encoder-Only (Phase 1) ======================\n","print(\"\\nπ§ [Phase 1] Loading T5-XXL encoder-only (4-bit)...\")\n","\n","model_id = \"mcmonkey/google_t5-v1_1-xxl_encoderonly\"\n","\n","tokenizer = T5Tokenizer.from_pretrained(model_id)\n","\n","quant_config = BitsAndBytesConfig(\n"," load_in_4bit=True,\n"," bnb_4bit_compute_dtype=torch.float16,\n"," bnb_4bit_use_double_quant=True,\n"," bnb_4bit_quant_type=\"nf4\"\n",")\n","\n","text_encoder = T5EncoderModel.from_pretrained(\n"," model_id,\n"," quantization_config=quant_config,\n"," device_map=\"auto\",\n"," torch_dtype=torch.float16\n",")\n","text_encoder.eval()\n","\n","t5_embeddings = []\n","max_length = 512\n","\n","print(f\"π Encoding {len(pairs)} texts with T5-XXL...\")\n","\n","for idx, (base, (txt_path, _)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Text {idx}/{len(pairs)} processed...\")\n","\n"," with open(txt_path, 'r', encoding='utf-8') as f:\n"," text = f.read().strip()\n","\n"," if not text:\n"," if debug:\n"," print(f\" Skipped empty: {base}\")\n"," continue\n","\n"," inputs = tokenizer(text, padding=\"max_length\", max_length=max_length,\n"," truncation=True, return_tensors=\"pt\")\n","\n"," input_ids = inputs.input_ids.to(\"cuda\")\n"," attention_mask = inputs.attention_mask.to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = text_encoder(input_ids=input_ids, attention_mask=attention_mask)\n"," t5_emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu().numpy()\n","\n"," t5_embeddings.append(t5_emb)\n","\n","# Unload T5 to free VRAM\n","del text_encoder\n","torch.cuda.empty_cache()\n","\n","t5_embs = np.array(t5_embeddings)\n","print(f\"β
T5 embeddings created β shape: {t5_embs.shape}\")\n","print(\"π§Ή T5 encoder unloaded from VRAM\")\n","\n","# ====================== 4. OpenCLIP (Phase 2) ======================\n","print(\"\\nπΈ [Phase 2] Loading OpenCLIP ViT-B-32...\")\n","\n","clip_model, _, preprocess = open_clip.create_model_and_transforms(\n"," \"ViT-B-32\", pretrained=\"laion400m_e32\"\n",")\n","clip_model.to(device)\n","clip_model.eval()\n","\n","clip_embeddings = []\n","\n","print(f\"π Encoding {len(pairs)} images with OpenCLIP...\")\n","\n","for idx, (base, (_, img_path)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Image {idx}/{len(pairs)} processed...\")\n","\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," clip_emb = clip_model.encode_image(image).squeeze(0).cpu().numpy()\n"," clip_embeddings.append(clip_emb)\n"," except Exception as e:\n"," if debug:\n"," print(f\" β οΈ Error on image {base}: {e}\")\n","\n","clip_embs = np.array(clip_embeddings)\n","print(f\"β
CLIP embeddings created β shape: {clip_embs.shape}\")\n","\n","# ====================== 5. Save to Google Drive ======================\n","os.makedirs(drive_save_folder, exist_ok=True)\n","\n","t5_save_path = f\"{drive_save_folder}/t5_embeddings.npy\"\n","clip_save_path = f\"{drive_save_folder}/clip_embeddings.npy\"\n","\n","np.save(t5_save_path, t5_embs)\n","np.save(clip_save_path, clip_embs)\n","\n","print(\"\\nπΎ Files saved to Google Drive:\")\n","print(f\" β {t5_save_path}\")\n","print(f\" β {clip_save_path}\")\n","print(f\" T5 shape : {t5_embs.shape}\")\n","print(f\" CLIP shape: {clip_embs.shape}\")\n","\n","if debug:\n"," print(f\"\\nπ Current contents in {drive_save_folder}:\")\n"," print(os.listdir(drive_save_folder))\n","\n","print(\"\\nπ Processing completed! Embeddings saved to Drive.\")"],"metadata":{"id":"9nIQeQfI7Osy"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 15: Test Trained CLIPβT5 Mapper on New Dataset\n","\n","import torch\n","import numpy as np\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# ====================== PARAMETERS ======================\n","drive_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","\n","print(f\"π Loading data from: {drive_folder}\")\n","\n","# ====================== 1. Load Embeddings from New Dataset ======================\n","t5_path = f\"{drive_folder}/t5_embeddings.npy\"\n","clip_path = f\"{drive_folder}/clip_embeddings.npy\"\n","\n","t5_embs = np.load(t5_path)\n","clip_embs = np.load(clip_path)\n","\n","print(f\"β
Loaded new dataset:\")\n","print(f\" CLIP embeddings: {clip_embs.shape}\")\n","print(f\" Real T5 embeddings: {t5_embs.shape}\")\n","\n","# ====================== 2. Load the Trained Mapper ======================\n","mapper_path = f\"{drive_folder}/clip_to_t5_mapper.pth\"\n","\n","class CLIPtoT5Mapper(torch.nn.Module):\n"," def __init__(self, input_dim=512, output_dim=4096, hidden_dim=2048):\n"," super().__init__()\n"," self.net = torch.nn.Sequential(\n"," torch.nn.Linear(input_dim, hidden_dim),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim, hidden_dim * 2),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim * 2, output_dim)\n"," )\n","\n"," def forward(self, x):\n"," return self.net(x)\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model = CLIPtoT5Mapper().to(device)\n","model.load_state_dict(torch.load(mapper_path, map_location=device))\n","model.eval()\n","\n","print(\"β
Trained CLIP β T5 mapper loaded successfully\")\n","\n","# ====================== 3. Generate Predicted T5 Embeddings ======================\n","print(\"π Generating predicted T5 embeddings from CLIP...\")\n","\n","with torch.no_grad():\n"," clip_tensor = torch.from_numpy(clip_embs).float().to(device)\n"," predicted_t5 = model(clip_tensor).cpu().numpy()\n","\n","print(f\"β
Predicted T5 shape: {predicted_t5.shape}\")\n","\n","# ====================== 4. Compute Comparison Metrics ======================\n","print(\"\\nπ Computing similarity metrics...\")\n","\n","# Cosine similarity: Predicted vs Real T5 (per pair)\n","pred_true_sim = np.diag(cosine_similarity(predicted_t5, t5_embs))\n","\n","# Pairwise similarities within groups\n","def get_pairwise_sim(embeddings):\n"," sim_matrix = cosine_similarity(embeddings)\n"," np.fill_diagonal(sim_matrix, np.nan)\n"," return sim_matrix.flatten()[~np.isnan(sim_matrix.flatten())]\n","\n","clip_sim = get_pairwise_sim(clip_embs)\n","t5_sim = get_pairwise_sim(t5_embs)\n","pred_t5_sim = get_pairwise_sim(predicted_t5)\n","\n","print(f\" CLIP vs CLIP mean similarity : {clip_sim.mean():.4f}\")\n","print(f\" True T5 vs True T5 mean similarity : {t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs Predicted T5 : {pred_t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs True T5 (key metric): {pred_true_sim.mean():.4f}\")\n","\n","# ====================== 5. Visualizations ======================\n","plt.figure(figsize=(20, 12))\n","\n","# Similarity histograms\n","plt.subplot(2, 3, 1)\n","sns.histplot(clip_sim, kde=True, color='skyblue', bins=50)\n","plt.title('CLIP vs CLIP\\n(Pairwise Cosine)')\n","plt.xlabel('Cosine Similarity')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 2)\n","sns.histplot(t5_sim, kde=True, color='green', bins=50)\n","plt.title('True T5 vs True T5\\n(Pairwise Cosine)')\n","plt.xlabel('Cosine Similarity')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 3)\n","sns.histplot(pred_t5_sim, kde=True, color='orange', bins=50)\n","plt.title('Predicted T5 vs Predicted T5\\n(Pairwise Cosine)')\n","plt.xlabel('Cosine Similarity')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 4)\n","sns.histplot(pred_true_sim, kde=True, color='purple', bins=50)\n","plt.title('Predicted T5 vs True T5\\n(Model Performance on New Data)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","# PCA scatter plots\n","pca_clip = PCA(n_components=2, random_state=42)\n","clip_2d = pca_clip.fit_transform(clip_embs)\n","\n","all_t5 = np.vstack([t5_embs, predicted_t5])\n","pca_t5 = PCA(n_components=2, random_state=42)\n","t5_2d = pca_t5.fit_transform(all_t5)\n","true_t5_2d = t5_2d[:len(t5_embs)]\n","pred_t5_2d = t5_2d[len(t5_embs):]\n","\n","plt.subplot(2, 3, 5)\n","plt.scatter(clip_2d[:, 0], clip_2d[:, 1], alpha=0.7, s=25, color='skyblue')\n","plt.title('CLIP Embeddings (PCA 2D)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 6)\n","plt.scatter(true_t5_2d[:, 0], true_t5_2d[:, 1], alpha=0.7, s=25, color='green', label='True T5')\n","plt.scatter(pred_t5_2d[:, 0], pred_t5_2d[:, 1], alpha=0.7, s=25, color='red', label='Predicted T5')\n","plt.title('True vs Predicted T5\\n(Shared PCA Space)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","\n","plt.tight_layout()\n","plt.show()\n","\n","# L2 Distance histogram\n","plt.figure(figsize=(8, 5))\n","diff_norms = np.linalg.norm(predicted_t5 - t5_embs, axis=1)\n","sns.histplot(diff_norms, kde=True, color='crimson', bins=50)\n","plt.title('L2 Distance: Predicted T5 β True T5')\n","plt.xlabel('Euclidean Distance')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","plt.show()\n","\n","print(\"\\nβ
Evaluation on new dataset completed!\")\n","if debug:\n"," print(f\" Number of test samples: {len(t5_embs)}\")\n"," print(f\" Average cosine similarity (Predicted vs True): {pred_true_sim.mean():.4f}\")"],"metadata":{"id":"7IZQR-fV78EC"},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_mapper.ipynb","timestamp":1774962583883}],"mount_file_id":"1l-BPQkLTyQYvwcF0H1MIg-7DIW_KoWqC","authorship_tag":"ABX9TyMWqR9PBFc0Hi+Gf8w7BCb4"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|
|
|
|
| 1 |
+
{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"6ceGJfjyvY4l","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 1: GPU Check & Runtime Validation\n","\n","import torch\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π Checking GPU runtime...\")\n","\n","if torch.cuda.is_available():\n"," gpu_name = torch.cuda.get_device_name(0)\n"," vram_gb = torch.cuda.get_device_properties(0).total_memory / 1024**3\n"," print(f\"β
GPU ready: {gpu_name}\")\n"," print(f\"β
VRAM: {vram_gb:.1f} GB\")\n"," if debug:\n"," print(f\" CUDA version: {torch.version.cuda}\")\n"," print(f\" Device index: {torch.cuda.current_device()}\")\n","else:\n"," print(\"β No GPU detected!\")\n"," print(\" Please go to: Runtime β Change runtime type β Hardware accelerator β GPU\")\n"," raise SystemExit(\"GPU is required. Please enable GPU runtime.\")\n","\n","print(\"β
GPU check passed\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ajsx94iUvbQH","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 2: Install Required Packages\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π¦ Installing dependencies...\")\n","!pip install -q --no-cache-dir transformers accelerate bitsandbytes open_clip_torch\n","\n","print(\"β
Packages installed successfully\")\n","if debug:\n"," print(\" Installed: transformers, accelerate, bitsandbytes, open_clip_torch\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"0npx6gtpvhih","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 3: Mount Google Drive\n","\n","from google.colab import drive\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","if debug:\n"," print(f\" Drive mounted at: /content/drive\")\n"," print(f\" MyDrive contents sample: {os.listdir('/content/drive/MyDrive')[:5] if os.path.exists('/content/drive/MyDrive') else 'Not accessible yet'}\")\n","\n","print(\"β
Google Drive mounted\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"4nMyIVRJvkiY","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 4: Configuration & Parameters\n","\n","zip_path = \"/content/drive/MyDrive/fetch_set.zip\" #@param {type:\"string\"}\n","extract_dir = \"/content/dataset\" #@param {type:\"string\"}\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"βοΈ Configuration loaded\")\n","print(f\" Zip path : {zip_path}\")\n","print(f\" Extract dir : {extract_dir}\")\n","\n","if debug:\n"," print(f\" Debug mode enabled β Extra printouts will be shown\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"H6GQcTn9vnzx","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 5: Unzip Dataset\n","\n","import os\n","import zipfile\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","print(\"π¦ Extracting zip file...\")\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","print(f\"β
Dataset extracted to: {extract_dir}\")\n","\n","if debug:\n"," total_files = sum([len(files) for r, d, files in os.walk(extract_dir)])\n"," print(f\" Total files extracted: {total_files}\")\n"," print(f\" Top-level folders/files: {os.listdir(extract_dir)[:10]}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ELEdDOuHvrkA","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 6: Find Paired Text & Image Files\n","\n","from collections import defaultdict\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","text_files = {}\n","image_files = {}\n","image_exts = ('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp')\n","\n","for root, _, files in os.walk(extract_dir):\n"," for filename in files:\n"," base = os.path.splitext(filename)[0]\n"," ext = os.path.splitext(filename)[1].lower()\n"," full_path = os.path.join(root, filename)\n","\n"," if ext == '.txt':\n"," text_files[base] = full_path\n"," elif ext in image_exts:\n"," image_files[base] = full_path\n","\n","# Keep only complete pairs\n","pairs = {base: (text_files[base], image_files[base])\n"," for base in text_files if base in image_files}\n","\n","print(f\"β
Found {len(pairs)} valid text-image pairs\")\n","\n","if debug:\n"," print(f\" Total .txt files found : {len(text_files)}\")\n"," print(f\" Total image files found: {len(image_files)}\")\n"," if len(pairs) > 0:\n"," sample_base = list(pairs.keys())[0]\n"," print(f\" Sample pair: {sample_base}.txt + {sample_base}{os.path.splitext(pairs[sample_base][1])[1]}\")\n"," else:\n"," print(\" β οΈ No pairs found. Check that filenames match exactly (without extension)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ot4ZcVtwxcPu","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 7: Load T5-XXL Encoder-Only β Encode Texts β Save β Unload VRAM\n","\n","import torch\n","import numpy as np\n","from transformers import T5EncoderModel, T5Tokenizer, BitsAndBytesConfig\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","# ====================== Model & VRAM Info ======================\n","print(\"π Using dedicated T5-XXL **encoder-only** model:\")\n","print(\" β’ Repo: mcmonkey/google_t5-v1_1-xxl_encoderonly\")\n","print(\" β’ Contains ONLY the encoder (~4.76B params)\")\n","print(\" β’ Much smaller download than the full 46 GB model\")\n","print(\" β’ With 4-bit quantization: ~2.5 β 3.5 GB VRAM expected\\n\")\n","\n","print(\"π§ Loading T5-XXL encoder-only (4-bit quantized)...\")\n","\n","model_id = \"mcmonkey/google_t5-v1_1-xxl_encoderonly\"\n","\n","tokenizer = T5Tokenizer.from_pretrained(model_id)\n","\n","quant_config = BitsAndBytesConfig(\n"," load_in_4bit=True,\n"," bnb_4bit_compute_dtype=torch.float16,\n"," bnb_4bit_use_double_quant=True,\n"," bnb_4bit_quant_type=\"nf4\"\n",")\n","\n","text_encoder = T5EncoderModel.from_pretrained(\n"," model_id,\n"," quantization_config=quant_config,\n"," device_map=\"auto\",\n"," torch_dtype=torch.float16\n",")\n","text_encoder.eval()\n","\n","print(\"β
T5-XXL encoder-only loaded successfully\")\n","\n","if debug:\n"," try:\n"," allocated = torch.cuda.memory_allocated() / (1024**3)\n"," print(f\" VRAM allocated after loading: {allocated:.2f} GB\")\n"," except:\n"," pass\n","\n","# ====================== Encode Texts ======================\n","t5_embeddings = []\n","max_length = 512\n","\n","print(f\"π Encoding {len(pairs)} texts with T5-XXL encoder...\")\n","\n","for idx, (base, (txt_path, _)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Processed {idx}/{len(pairs)} texts...\")\n","\n"," with open(txt_path, 'r', encoding='utf-8') as f:\n"," text = f.read().strip()\n","\n"," if not text:\n"," if debug:\n"," print(f\" Skipped empty text: {base}\")\n"," continue\n","\n"," inputs = tokenizer(\n"," text,\n"," padding=\"max_length\",\n"," max_length=max_length,\n"," truncation=True,\n"," return_tensors=\"pt\"\n"," )\n","\n"," input_ids = inputs.input_ids.to(\"cuda\")\n"," attention_mask = inputs.attention_mask.to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = text_encoder(input_ids=input_ids, attention_mask=attention_mask)\n"," # Mean pooling β 4096-dim embedding\n"," t5_emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu().numpy()\n","\n"," t5_embeddings.append(t5_emb)\n","\n","t5_embs = np.array(t5_embeddings)\n","t5_save_path = \"/content/t5_embeddings.npy\"\n","np.save(t5_save_path, t5_embs)\n","\n","print(f\"β
Encoded {len(t5_embs)} texts\")\n","print(f\"πΎ Saved to: {t5_save_path} (shape: {t5_embs.shape})\")\n","\n","# ====================== Unload T5 to free VRAM ======================\n","del text_encoder\n","torch.cuda.empty_cache()\n","\n","print(\"π§Ή T5 encoder unloaded from VRAM\")\n","if debug:\n"," try:\n"," print(f\" VRAM allocated after unload: {torch.cuda.memory_allocated() / (1024**3):.2f} GB\")\n"," except:\n"," pass"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"ZWLwMFi_zMDk","cellView":"form"},"outputs":[],"source":["# @markdown # Cell 8: Load OpenCLIP ViT-B-32 β Encode Images\n","\n","import torch\n","import numpy as np\n","from PIL import Image\n","import open_clip\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","print(f\"π§ Using device: {device}\")\n","\n","# ====================== Load OpenCLIP ======================\n","print(\"πΈ Loading OpenCLIP ViT-B-32 (laion400m_e32)...\")\n","clip_model, _, preprocess = open_clip.create_model_and_transforms(\n"," \"ViT-B-32\", pretrained=\"laion400m_e32\"\n",")\n","clip_model.to(device)\n","clip_model.eval()\n","\n","print(\"β
OpenCLIP loaded successfully\")\n","\n","if debug:\n"," try:\n"," mem = torch.cuda.memory_allocated() / (1024**3)\n"," print(f\" VRAM used after loading CLIP: {mem:.2f} GB\")\n"," except:\n"," pass\n","\n","# ====================== Encode Images ======================\n","clip_embeddings = []\n","\n","print(f\"π Encoding {len(pairs)} images...\")\n","\n","# Load T5 embeddings (to keep order consistent)\n","t5_embs = np.load(\"/content/t5_embeddings.npy\")\n","\n","for idx, (base, (_, img_path)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Processed {idx}/{len(pairs)} images...\")\n","\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," clip_emb = clip_model.encode_image(image).squeeze(0).cpu().numpy()\n"," clip_embeddings.append(clip_emb)\n"," except Exception as e:\n"," if debug:\n"," print(f\" β οΈ Error on image {base}: {e}\")\n"," continue\n","\n","clip_embs = np.array(clip_embeddings)\n","clip_save_path = \"/content/clip_embeddings.npy\"\n","np.save(clip_save_path, clip_embs)\n","\n","print(f\"β
Encoded {len(clip_embs)} images\")\n","print(f\"πΎ Saved to: {clip_save_path} (shape: {clip_embs.shape})\")\n","\n","print(f\" T5 shape : {t5_embs.shape}\")\n","print(f\" CLIP shape: {clip_embs.shape}\")\n","if len(t5_embs) != len(clip_embs):\n"," print(\"β οΈ Note: Some images may have failed to process (shapes don't match)\")"]},{"cell_type":"code","source":["# @markdown # Cell 9: Train Mapper (CLIP image embedding β T5 text encoding)\n","\n","import torch.nn as nn\n","from torch.utils.data import TensorDataset, DataLoader\n","import torch\n","import numpy as np\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# Load saved embeddings\n","clip_embs = np.load(\"/content/clip_embeddings.npy\")\n","t5_embs = np.load(\"/content/t5_embeddings.npy\")\n","\n","print(f\"π Loaded CLIP embeddings (input) : {clip_embs.shape}\")\n","print(f\"π Loaded T5 embeddings (target) : {t5_embs.shape}\")\n","\n","class CLIPtoT5Mapper(nn.Module):\n"," def __init__(self, input_dim=512, output_dim=4096, hidden_dim=2048):\n"," super().__init__()\n"," self.net = nn.Sequential(\n"," nn.Linear(input_dim, hidden_dim),\n"," nn.ReLU(),\n"," nn.Linear(hidden_dim, hidden_dim * 2),\n"," nn.ReLU(),\n"," nn.Linear(hidden_dim * 2, output_dim)\n"," )\n","\n"," def forward(self, x):\n"," return self.net(x)\n","\n","# Dataset: CLIP β T5\n","dataset = TensorDataset(\n"," torch.from_numpy(clip_embs).float(),\n"," torch.from_numpy(t5_embs).float()\n",")\n","dataloader = DataLoader(dataset, batch_size=16, shuffle=True)\n","\n","model = CLIPtoT5Mapper().to(device)\n","optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)\n","criterion = nn.MSELoss()\n","\n","num_epochs = 300 #@param {type:\"integer\"}\n","\n","print(f\"π Training CLIP β T5 mapper for {num_epochs} epochs...\")\n","\n","for epoch in range(num_epochs):\n"," total_loss = 0.0\n"," for inputs, targets in dataloader:\n"," inputs = inputs.to(device)\n"," targets = targets.to(device)\n","\n"," optimizer.zero_grad()\n"," outputs = model(inputs)\n"," loss = criterion(outputs, targets)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n","\n"," avg_loss = total_loss / len(dataloader)\n"," if (epoch + 1) % 10 == 0 or epoch == 0:\n"," print(f\"Epoch {epoch+1:2d}/{num_epochs} | Avg Loss: {avg_loss:.6f}\")\n","\n","# Save locally first\n","local_save_path = \"/content/clip_to_t5_mapper.pth\"\n","torch.save(model.state_dict(), local_save_path)\n","\n","print(\"π Training completed!\")\n","print(f\"β
Model saved locally: {local_save_path}\")"],"metadata":{"id":"dGb79_UaYpbe","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 10: Save Model + Embeddings to Google Drive\n","\n","import shutil\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# @markdown ### Choose folder in your Drive (change if needed)\n","drive_save_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","\n","os.makedirs(drive_save_folder, exist_ok=True)\n","\n","# Files to save\n","files_to_save = {\n"," \"/content/clip_to_t5_mapper.pth\": f\"{drive_save_folder}/clip_to_t5_mapper.pth\",\n"," \"/content/t5_embeddings.npy\": f\"{drive_save_folder}/t5_embeddings.npy\",\n"," \"/content/clip_embeddings.npy\": f\"{drive_save_folder}/clip_embeddings.npy\"\n","}\n","\n","print(f\"πΎ Saving files to Google Drive folder: {drive_save_folder}\")\n","\n","for local_path, drive_path in files_to_save.items():\n"," if os.path.exists(local_path):\n"," shutil.copy2(local_path, drive_path)\n"," print(f\"β
Saved: {os.path.basename(local_path)} β {drive_path}\")\n"," else:\n"," print(f\"β οΈ File not found: {local_path}\")\n","\n","print(\"\\nπ All files saved to Google Drive!\")\n","if debug:\n"," print(f\" Drive folder contents: {os.listdir(drive_save_folder)}\")"],"metadata":{"id":"N0CQFqwX4TBz","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# Step 1: Mount Google Drive\n","from google.colab import drive\n","drive.mount('/content/drive')"],"metadata":{"id":"AEA3l5bSLSo1"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 11: Load Model + Embeddings from Google Drive\n","\n","import torch\n","import numpy as np\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","\n","# @markdown ### Path to your saved folder in Drive\n","drive_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","\n","# File paths\n","mapper_path = f\"{drive_folder}/clip_to_t5_mapper.pth\"\n","t5_path = f\"{drive_folder}/t5_embeddings.npy\"\n","clip_path = f\"{drive_folder}/clip_embeddings.npy\"\n","\n","print(\"π Loading files from Google Drive...\")\n","\n","# Load embeddings\n","t5_embs = np.load(t5_path)\n","clip_embs = np.load(clip_path)\n","\n","print(f\"β
Loaded T5 embeddings : {t5_embs.shape}\")\n","print(f\"β
Loaded CLIP embeddings: {clip_embs.shape}\")\n","\n","# Define the model class again (needed for loading state_dict)\n","class CLIPtoT5Mapper(torch.nn.Module):\n"," def __init__(self, input_dim=512, output_dim=4096, hidden_dim=2048):\n"," super().__init__()\n"," self.net = torch.nn.Sequential(\n"," torch.nn.Linear(input_dim, hidden_dim),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim, hidden_dim * 2),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim * 2, output_dim)\n"," )\n","\n"," def forward(self, x):\n"," return self.net(x)\n","\n","# Load the trained mapper\n","model = CLIPtoT5Mapper().to(device)\n","model.load_state_dict(torch.load(mapper_path, map_location=device))\n","model.eval()\n","\n","print(\"β
CLIP β T5 mapper loaded successfully\")\n","print(f\" Model ready on device: {device}\")\n","\n","if debug:\n"," print(f\" Mapper path: {mapper_path}\")\n"," print(f\" Total parameters: {sum(p.numel() for p in model.parameters()):,}\")"],"metadata":{"id":"L9SM8hbM4KCK","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 12: Visualize Differences β CLIP vs CLIP, T5 vs T5, Predicted T5 vs Predicted T5, and Predicted vs True T5\n","\n","import torch\n","import numpy as np\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","print(\"π Preparing visualizations for embedding differences...\")\n","\n","# Make sure we have everything loaded (run Cell 11 first)\n","if 'model' not in globals() or 'clip_embs' not in globals() or 't5_embs' not in globals():\n"," print(\"β οΈ Please run Cell 11 first to load model + embeddings!\")\n"," raise SystemExit(\"Missing data\")\n","\n","# ====================== 1. Compute predicted T5 from CLIP ======================\n","print(\"π Computing predicted T5 embeddings from CLIP inputs...\")\n","with torch.no_grad():\n"," clip_tensor = torch.from_numpy(clip_embs).float().to(device)\n"," predicted_t5 = model(clip_tensor).cpu().numpy()\n","\n","print(f\"β
Predicted T5 shape: {predicted_t5.shape}\")\n","\n","# ====================== 2. Pairwise cosine similarities WITHIN each set ======================\n","print(\"π Calculating pairwise cosine similarities (within each embedding group)...\")\n","\n","def get_pairwise_similarities(embeddings):\n"," \"\"\"Return off-diagonal cosine similarities (excluding self-similarity=1.0)\"\"\"\n"," sim_matrix = cosine_similarity(embeddings)\n"," np.fill_diagonal(sim_matrix, np.nan) # ignore diagonal (always 1.0)\n"," return sim_matrix.flatten()[~np.isnan(sim_matrix.flatten())]\n","\n","# CLIP vs CLIP (among themselves)\n","clip_sim = get_pairwise_similarities(clip_embs)\n","\n","# True T5 vs True T5 (among themselves)\n","t5_sim = get_pairwise_similarities(t5_embs)\n","\n","# Predicted T5 vs Predicted T5 (among themselves)\n","pred_t5_sim = get_pairwise_similarities(predicted_t5)\n","\n","# Predicted T5 vs True T5 (the most important metric)\n","pred_true_sim = np.diag(cosine_similarity(predicted_t5, t5_embs))\n","\n","print(f\" CLIP vs CLIP mean similarity : {clip_sim.mean():.4f}\")\n","print(f\" True T5 vs True T5 mean similarity: {t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs Predicted T5 : {pred_t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs True T5 (model quality): {pred_true_sim.mean():.4f}\")\n","\n","# ====================== 3. Dimensionality reduction for scatter plots ======================\n","print(\"π Reducing dimensions with PCA...\")\n","\n","# PCA on CLIP space\n","pca_clip = PCA(n_components=2, random_state=42)\n","clip_2d = pca_clip.fit_transform(clip_embs)\n","\n","# PCA on T5 space (true + predicted in SAME space)\n","all_t5 = np.vstack([t5_embs, predicted_t5])\n","pca_t5 = PCA(n_components=2, random_state=42)\n","t5_2d = pca_t5.fit_transform(all_t5)\n","true_t5_2d = t5_2d[:len(t5_embs)]\n","pred_t5_2d = t5_2d[len(t5_embs):]\n","\n","print(f\" PCA explained variance β CLIP: {pca_clip.explained_variance_ratio_.sum():.1%}\")\n","print(f\" PCA explained variance β T5 : {pca_t5.explained_variance_ratio_.sum():.1%}\")\n","\n","# ====================== 4. Plotting ======================\n","plt.figure(figsize=(20, 12))\n","\n","# Row 1: Pairwise similarity histograms (within each group)\n","plt.subplot(2, 3, 1)\n","sns.histplot(clip_sim, kde=True, color='skyblue', bins=50)\n","plt.title('CLIP vs CLIP\\n(Pairwise Cosine Similarity)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 2)\n","sns.histplot(t5_sim, kde=True, color='green', bins=50)\n","plt.title('True T5 vs True T5\\n(Pairwise Cosine Similarity)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 3)\n","sns.histplot(pred_t5_sim, kde=True, color='orange', bins=50)\n","plt.title('Predicted T5 vs Predicted T5\\n(Pairwise Cosine Similarity)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","# Row 2: Model quality + geometry\n","plt.subplot(2, 3, 4)\n","sns.histplot(pred_true_sim, kde=True, color='purple', bins=50)\n","plt.title('Predicted T5 vs True T5\\n(Model Quality β Per-Pair)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 5)\n","plt.scatter(clip_2d[:, 0], clip_2d[:, 1], alpha=0.7, s=25, color='skyblue')\n","plt.title('CLIP Embeddings (PCA 2D)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 6)\n","plt.scatter(true_t5_2d[:, 0], true_t5_2d[:, 1], alpha=0.7, s=25, color='green', label='True T5')\n","plt.scatter(pred_t5_2d[:, 0], pred_t5_2d[:, 1], alpha=0.7, s=25, color='red', label='Predicted T5')\n","plt.title('True T5 vs Predicted T5\\n(in shared PCA space)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","\n","plt.tight_layout()\n","plt.show()\n","\n","# ====================== 5. Optional: L2 distance histogram ======================\n","plt.figure(figsize=(8, 5))\n","diff_vectors = predicted_t5 - t5_embs\n","diff_norms = np.linalg.norm(diff_vectors, axis=1)\n","sns.histplot(diff_norms, kde=True, color='crimson', bins=50)\n","plt.title('L2 (Euclidean) Distance: Predicted T5 β True T5')\n","plt.xlabel('Distance')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","plt.show()\n","\n","print(f\"\\nβ
All plots generated successfully!\")\n","print(f\" Number of samples used: {len(clip_embs)}\")\n","if debug:\n"," print(\"\\nπ‘ Interpretation guide:\")\n"," print(\" β’ First three histograms: How clustered / diverse each embedding type is internally\")\n"," print(\" β’ Fourth histogram: How close the modelβs predictions are to the real T5 embeddings\")\n"," print(\" β’ Scatter plots: Visual geometry and overlap between true vs predicted T5\")"],"metadata":{"id":"lb-d8VnI4HXP","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 13: Install Dependencies, GPU Check & Mount Drive\n","\n","import torch\n","import subprocess\n","import sys\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# ====================== GPU Check ======================\n","print(\"π Checking GPU...\")\n","if torch.cuda.is_available():\n"," print(f\"β
GPU ready: {torch.cuda.get_device_name(0)}\")\n"," print(f\"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1024**3:.1f} GB\")\n","else:\n"," print(\"β No GPU detected! Please enable GPU runtime.\")\n"," raise SystemExit(\"GPU is required\")\n","\n","# ====================== Install Dependencies ======================\n","print(\"\\nπ¦ Installing required packages...\")\n","\n","!pip install -q --no-cache-dir open_clip_torch transformers accelerate bitsandbytes\n","\n","print(\"β
All dependencies installed successfully\")\n","\n","# ====================== Mount Google Drive ======================\n","from google.colab import drive\n","drive.mount('/content/drive', force_remount=True)\n","\n","print(\"β
Google Drive mounted\")\n","if debug:\n"," print(f\" Drive path: /content/drive/MyDrive\")"],"metadata":{"id":"fhK7ma507JmQ","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 14: Process New Zip File β Generate T5 + CLIP Embeddings β Save to Drive\n","\n","import torch\n","import numpy as np\n","from PIL import Image\n","import open_clip\n","import os\n","import zipfile\n","from transformers import T5EncoderModel, T5Tokenizer, BitsAndBytesConfig\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# ====================== PARAMETERS ======================\n","zip_path = \"/content/drive/MyDrive/lisa_simpson.zip\" #@param {type:\"string\"}\n","drive_save_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","extract_dir = \"/content/new_dataset\" #@param {type:\"string\"}\n","\n","print(f\"π Processing zip file: {zip_path}\")\n","print(f\"πΎ Will save embeddings to: {drive_save_folder}\")\n","\n","# ====================== 1. Unzip the Dataset ======================\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","device =\"cuda\" if torch.cuda.is_available() else \"cpu\"\n","\n","\n","print(\"π¦ Extracting zip file...\")\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","print(f\"β
Extracted to: {extract_dir}\")\n","\n","# ====================== 2. Find Paired Text & Image Files ======================\n","text_files = {}\n","image_files = {}\n","image_exts = ('.png', '.jpg', '.jpeg', '.gif', '.bmp', '.webp')\n","\n","for root, _, files in os.walk(extract_dir):\n"," for filename in files:\n"," base = os.path.splitext(filename)[0]\n"," ext = os.path.splitext(filename)[1].lower()\n"," full_path = os.path.join(root, filename)\n","\n"," if ext == '.txt':\n"," text_files[base] = full_path\n"," elif ext in image_exts:\n"," image_files[base] = full_path\n","\n","pairs = {base: (text_files[base], image_files[base])\n"," for base in text_files if base in image_files}\n","\n","print(f\"β
Found {len(pairs)} valid text-image pairs\")\n","\n","if debug and len(pairs) > 0:\n"," sample = list(pairs.keys())[0]\n"," print(f\" Sample pair: {sample}.txt + {sample}{os.path.splitext(pairs[sample][1])[1]}\")\n","\n","# ====================== 3. T5-XXL Encoder-Only (Phase 1) ======================\n","print(\"\\nπ§ [Phase 1] Loading T5-XXL encoder-only (4-bit)...\")\n","\n","model_id = \"mcmonkey/google_t5-v1_1-xxl_encoderonly\"\n","\n","tokenizer = T5Tokenizer.from_pretrained(model_id)\n","\n","quant_config = BitsAndBytesConfig(\n"," load_in_4bit=True,\n"," bnb_4bit_compute_dtype=torch.float16,\n"," bnb_4bit_use_double_quant=True,\n"," bnb_4bit_quant_type=\"nf4\"\n",")\n","\n","text_encoder = T5EncoderModel.from_pretrained(\n"," model_id,\n"," quantization_config=quant_config,\n"," device_map=\"auto\",\n"," torch_dtype=torch.float16\n",")\n","text_encoder.eval()\n","\n","t5_embeddings = []\n","max_length = 512\n","\n","print(f\"π Encoding {len(pairs)} texts with T5-XXL...\")\n","\n","for idx, (base, (txt_path, _)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Text {idx}/{len(pairs)} processed...\")\n","\n"," with open(txt_path, 'r', encoding='utf-8') as f:\n"," text = f.read().strip()\n","\n"," if not text:\n"," if debug:\n"," print(f\" Skipped empty: {base}\")\n"," continue\n","\n"," inputs = tokenizer(text, padding=\"max_length\", max_length=max_length,\n"," truncation=True, return_tensors=\"pt\")\n","\n"," input_ids = inputs.input_ids.to(\"cuda\")\n"," attention_mask = inputs.attention_mask.to(\"cuda\")\n","\n"," with torch.no_grad():\n"," outputs = text_encoder(input_ids=input_ids, attention_mask=attention_mask)\n"," t5_emb = outputs.last_hidden_state.mean(dim=1).squeeze(0).cpu().numpy()\n","\n"," t5_embeddings.append(t5_emb)\n","\n","# Unload T5 to free VRAM\n","del text_encoder\n","torch.cuda.empty_cache()\n","\n","t5_embs = np.array(t5_embeddings)\n","print(f\"β
T5 embeddings created β shape: {t5_embs.shape}\")\n","print(\"π§Ή T5 encoder unloaded from VRAM\")\n","\n","# ====================== 4. OpenCLIP (Phase 2) ======================\n","print(\"\\nπΈ [Phase 2] Loading OpenCLIP ViT-B-32...\")\n","\n","clip_model, _, preprocess = open_clip.create_model_and_transforms(\n"," \"ViT-B-32\", pretrained=\"laion400m_e32\"\n",")\n","clip_model.to(device)\n","clip_model.eval()\n","\n","clip_embeddings = []\n","\n","print(f\"π Encoding {len(pairs)} images with OpenCLIP...\")\n","\n","for idx, (base, (_, img_path)) in enumerate(pairs.items()):\n"," if debug and idx % 50 == 0 and idx > 0:\n"," print(f\" Image {idx}/{len(pairs)} processed...\")\n","\n"," try:\n"," image = preprocess(Image.open(img_path)).unsqueeze(0).to(device)\n"," with torch.no_grad():\n"," clip_emb = clip_model.encode_image(image).squeeze(0).cpu().numpy()\n"," clip_embeddings.append(clip_emb)\n"," except Exception as e:\n"," if debug:\n"," print(f\" β οΈ Error on image {base}: {e}\")\n","\n","clip_embs = np.array(clip_embeddings)\n","print(f\"β
CLIP embeddings created β shape: {clip_embs.shape}\")\n","\n","# ====================== 5. Save to Google Drive ======================\n","os.makedirs(drive_save_folder, exist_ok=True)\n","\n","t5_save_path = f\"{drive_save_folder}/t5_embeddings.npy\"\n","clip_save_path = f\"{drive_save_folder}/clip_embeddings.npy\"\n","\n","np.save(t5_save_path, t5_embs)\n","np.save(clip_save_path, clip_embs)\n","\n","print(\"\\nπΎ Files saved to Google Drive:\")\n","print(f\" β {t5_save_path}\")\n","print(f\" β {clip_save_path}\")\n","print(f\" T5 shape : {t5_embs.shape}\")\n","print(f\" CLIP shape: {clip_embs.shape}\")\n","\n","if debug:\n"," print(f\"\\nπ Current contents in {drive_save_folder}:\")\n"," print(os.listdir(drive_save_folder))\n","\n","print(\"\\nπ Processing completed! Embeddings saved to Drive.\")"],"metadata":{"id":"9nIQeQfI7Osy","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown # Cell 15: Test Trained CLIPβT5 Mapper on New Dataset\n","\n","import torch\n","import numpy as np\n","import matplotlib.pyplot as plt\n","import seaborn as sns\n","from sklearn.decomposition import PCA\n","from sklearn.metrics.pairwise import cosine_similarity\n","import os\n","\n","debug = True #@param {type:\"boolean\"}\n","\n","# ====================== PARAMETERS ======================\n","drive_folder = \"/content/drive/MyDrive/T5_CLIP_Mapper\" #@param {type:\"string\"}\n","\n","print(f\"π Loading data from: {drive_folder}\")\n","\n","# ====================== 1. Load Embeddings from New Dataset ======================\n","t5_path = f\"{drive_folder}/t5_embeddings.npy\"\n","clip_path = f\"{drive_folder}/clip_embeddings.npy\"\n","\n","t5_embs = np.load(t5_path)\n","clip_embs = np.load(clip_path)\n","\n","print(f\"β
Loaded new dataset:\")\n","print(f\" CLIP embeddings: {clip_embs.shape}\")\n","print(f\" Real T5 embeddings: {t5_embs.shape}\")\n","\n","# ====================== 2. Load the Trained Mapper ======================\n","mapper_path = f\"{drive_folder}/clip_to_t5_mapper.pth\"\n","\n","class CLIPtoT5Mapper(torch.nn.Module):\n"," def __init__(self, input_dim=512, output_dim=4096, hidden_dim=2048):\n"," super().__init__()\n"," self.net = torch.nn.Sequential(\n"," torch.nn.Linear(input_dim, hidden_dim),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim, hidden_dim * 2),\n"," torch.nn.ReLU(),\n"," torch.nn.Linear(hidden_dim * 2, output_dim)\n"," )\n","\n"," def forward(self, x):\n"," return self.net(x)\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","model = CLIPtoT5Mapper().to(device)\n","model.load_state_dict(torch.load(mapper_path, map_location=device))\n","model.eval()\n","\n","print(\"β
Trained CLIP β T5 mapper loaded successfully\")\n","\n","# ====================== 3. Generate Predicted T5 Embeddings ======================\n","print(\"π Generating predicted T5 embeddings from CLIP...\")\n","\n","with torch.no_grad():\n"," clip_tensor = torch.from_numpy(clip_embs).float().to(device)\n"," predicted_t5 = model(clip_tensor).cpu().numpy()\n","\n","print(f\"β
Predicted T5 shape: {predicted_t5.shape}\")\n","\n","# ====================== 4. Compute Comparison Metrics ======================\n","print(\"\\nπ Computing similarity metrics...\")\n","\n","# Cosine similarity: Predicted vs Real T5 (per pair)\n","pred_true_sim = np.diag(cosine_similarity(predicted_t5, t5_embs))\n","\n","# Pairwise similarities within groups\n","def get_pairwise_sim(embeddings):\n"," sim_matrix = cosine_similarity(embeddings)\n"," np.fill_diagonal(sim_matrix, np.nan)\n"," return sim_matrix.flatten()[~np.isnan(sim_matrix.flatten())]\n","\n","clip_sim = get_pairwise_sim(clip_embs)\n","t5_sim = get_pairwise_sim(t5_embs)\n","pred_t5_sim = get_pairwise_sim(predicted_t5)\n","\n","print(f\" CLIP vs CLIP mean similarity : {clip_sim.mean():.4f}\")\n","print(f\" True T5 vs True T5 mean similarity : {t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs Predicted T5 : {pred_t5_sim.mean():.4f}\")\n","print(f\" Predicted T5 vs True T5 (key metric): {pred_true_sim.mean():.4f}\")\n","\n","# ====================== 5. Visualizations ======================\n","plt.figure(figsize=(20, 12))\n","\n","# Similarity histograms\n","plt.subplot(2, 3, 1)\n","sns.histplot(clip_sim, kde=True, color='skyblue', bins=50)\n","plt.title('CLIP vs CLIP\\n(Pairwise Cosine)')\n","plt.xlabel('Cosine Similarity')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 2)\n","sns.histplot(t5_sim, kde=True, color='green', bins=50)\n","plt.title('True T5 vs True T5\\n(Pairwise Cosine)')\n","plt.xlabel('Cosine Similarity')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 3)\n","sns.histplot(pred_t5_sim, kde=True, color='orange', bins=50)\n","plt.title('Predicted T5 vs Predicted T5\\n(Pairwise Cosine)')\n","plt.xlabel('Cosine Similarity')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 4)\n","sns.histplot(pred_true_sim, kde=True, color='purple', bins=50)\n","plt.title('Predicted T5 vs True T5\\n(Model Performance on New Data)')\n","plt.xlabel('Cosine Similarity')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","\n","# PCA scatter plots\n","pca_clip = PCA(n_components=2, random_state=42)\n","clip_2d = pca_clip.fit_transform(clip_embs)\n","\n","all_t5 = np.vstack([t5_embs, predicted_t5])\n","pca_t5 = PCA(n_components=2, random_state=42)\n","t5_2d = pca_t5.fit_transform(all_t5)\n","true_t5_2d = t5_2d[:len(t5_embs)]\n","pred_t5_2d = t5_2d[len(t5_embs):]\n","\n","plt.subplot(2, 3, 5)\n","plt.scatter(clip_2d[:, 0], clip_2d[:, 1], alpha=0.7, s=25, color='skyblue')\n","plt.title('CLIP Embeddings (PCA 2D)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.grid(True, alpha=0.3)\n","\n","plt.subplot(2, 3, 6)\n","plt.scatter(true_t5_2d[:, 0], true_t5_2d[:, 1], alpha=0.7, s=25, color='green', label='True T5')\n","plt.scatter(pred_t5_2d[:, 0], pred_t5_2d[:, 1], alpha=0.7, s=25, color='red', label='Predicted T5')\n","plt.title('True vs Predicted T5\\n(Shared PCA Space)')\n","plt.xlabel('PC1')\n","plt.ylabel('PC2')\n","plt.legend()\n","plt.grid(True, alpha=0.3)\n","\n","plt.tight_layout()\n","plt.show()\n","\n","# L2 Distance histogram\n","plt.figure(figsize=(8, 5))\n","diff_norms = np.linalg.norm(predicted_t5 - t5_embs, axis=1)\n","sns.histplot(diff_norms, kde=True, color='crimson', bins=50)\n","plt.title('L2 Distance: Predicted T5 β True T5')\n","plt.xlabel('Euclidean Distance')\n","plt.ylabel('Count')\n","plt.grid(True, alpha=0.3)\n","plt.show()\n","\n","print(\"\\nβ
Evaluation on new dataset completed!\")\n","if debug:\n"," print(f\" Number of test samples: {len(t5_embs)}\")\n"," print(f\" Average cosine similarity (Predicted vs True): {pred_true_sim.mean():.4f}\")"],"metadata":{"id":"7IZQR-fV78EC","cellView":"form"},"execution_count":null,"outputs":[]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_mapper.ipynb","timestamp":1774969191756},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_mapper.ipynb","timestamp":1774962583883}],"mount_file_id":"1l-BPQkLTyQYvwcF0H1MIg-7DIW_KoWqC","authorship_tag":"ABX9TyOqlKv4pW2i4m60ejL10qvK"},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|