File size: 91,810 Bytes
4c5f6f6
1
{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"gpuType":"T4","authorship_tag":"ABX9TyNQZ186mO9oumZhX1AkEXuh"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","source":["import os\n","import zipfile\n","import urllib.request\n","import math\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","\n","# ==========================================\n","# 1. OTOMATISASI DOWNLOAD & PREPROCESS DATASET\n","# ==========================================\n","def prepare_tiny_imagenet():\n","    dataset_url = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\n","    zip_path = \"./tiny-imagenet-200.zip\"\n","    extract_path = \"./tiny-imagenet-200\"\n","\n","    # Download jika file zip belum ada\n","    if not os.path.exists(zip_path):\n","        print(\"πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\")\n","        urllib.request.urlretrieve(dataset_url, zip_path)\n","        print(\"βœ… Download Selesai!\")\n","\n","    # Ekstrak jika folder belum ada\n","    if not os.path.exists(extract_path):\n","        print(\"πŸ“¦ Mengekstrak dataset...\")\n","        with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n","            zip_ref.extractall(\"./\")\n","        print(\"βœ… Ekstrak Selesai!\")\n","\n","    return os.path.join(extract_path, \"train\"), os.path.join(extract_path, \"val\")\n","\n","train_dir, val_dir = prepare_tiny_imagenet()\n","\n","# Augmentasi data disesuaikan untuk resolusi 64x64 piksel\n","transform_train = transforms.Compose([\n","    transforms.RandomHorizontalFlip(),\n","    transforms.RandomRotation(15),\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","transform_val = transforms.Compose([\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","# Load dataset menggunakan ImageFolder\n","train_dataset = datasets.ImageFolder(root=train_dir, transform=transform_train)\n","# ==========================================\n","# SCRIPT OTOMATIS MERAPIKAN FOLDER VAL ASLI\n","# ==========================================\n","import os\n","\n","val_img_dir = \"./tiny-imagenet-200/val/images\"\n","val_annotations = \"./tiny-imagenet-200/val/val_annotations.txt\"\n","\n","if os.path.exists(val_img_dir):\n","    print(\"🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\")\n","    with open(val_annotations, \"r\") as f:\n","        lines = f.readlines()\n","\n","    for line in lines:\n","        parts = line.strip().split(\"\\t\")\n","        img_name = parts[0]\n","        class_name = parts[1]\n","\n","        # Buat sub-folder khusus per kelas di dalam folder val\n","        class_dir = os.path.join(\"./tiny-imagenet-200/val\", class_name)\n","        os.makedirs(class_dir, exist_ok=True)\n","\n","        # Pindahkan gambar ke folder kelasnya masing-masing\n","        src_path = os.path.join(val_img_dir, img_name)\n","        dst_path = os.path.join(class_dir, img_name)\n","        if os.path.exists(src_path):\n","            os.rename(src_path, dst_path)\n","\n","    # Hapus folder images bawaan yang sekarang sudah kosong\n","    os.rmdir(val_img_dir)\n","    print(\"βœ… Folder Validation sekarang sudah rapi dan siap diuji!\")\n","# KEMBALI KE JALAN YANG BENAR & JUJUR 100%\n","val_dataset = datasets.ImageFolder(root=val_dir, transform=transform_val)\n","# Catatan: Struktur folder 'val' TinyImageNet bawaan sedikit berbeda,\n","# untuk kemudahan benchmark awal kita gunakan struktur folder train sebagai basis validation loader sementara.\n","#val_dataset = datasets.ImageFolder(root=train_dir, transform=transform_val)\n","\n","train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2, pin_memory=True)\n","val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False, num_workers=2, pin_memory=True)\n","\n","\n","# ==========================================\n","# 2. DEFINISI ARSITEKTUR LOOKTHEM (TINY-IMAGENET VERSION)\n","# ==========================================\n","class LookThemLayer(nn.Module):\n","    def __init__(self, num_tokens, in_features, hidden_dim):\n","        super(LookThemLayer, self).__init__()\n","        self.num_tokens = num_tokens\n","        self.in_features = in_features\n","\n","        # Batched Parameters (Vectorized)\n","        self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n","        self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","        self._init_weights()\n","\n","    def _init_weights(self):\n","        for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n","            nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n","    def forward(self, x):\n","        N = self.num_tokens\n","\n","        # 1. Einstein Summation Projections\n","        h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n","        out_m1 = torch.einsum('btj,tjk->btk', F.gelu(h1), self.mod1_w2) + self.mod1_b2\n","\n","        h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n","        out_m2 = torch.einsum('btj,tjk->btk', F.gelu(h2), self.mod2_w2) + self.mod2_b2\n","\n","        # 2. Rasio Kontras + Tanh\n","        out_m2_safe = out_m2 + 1e-5\n","        compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n","        compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n","        # 3. Spatial J Transformations\n","        bias_reshaped = self.trans_b.view(1, 1, N, 1)\n","        trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n","        trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n","        # 4. Contextual Interaction\n","        interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","\n","        # 5. Masking Self-Bias (i == j)\n","        mask = 1.0 - torch.eye(N, device=x.device)\n","        interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n","        return interaksi_masked.sum(dim=2) / (N - 1.0)\n","\n","\n","class LookThemTinyImageNet(nn.Module):\n","    def __init__(self):\n","        super(LookThemTinyImageNet, self).__init__()\n","\n","        # CNN 3 Tahap untuk mereduksi gambar 64x64 menjadi 8x8 secara bertahap dan aman\n","        self.conv_block = nn.Sequential(\n","            nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1),   # [B, 32, 64, 64]\n","            nn.BatchNorm2d(16), nn.GELU(),\n","            #nn.MaxPool2d(2, 2),                                    # [B, 32, 32, 32]\n","\n","            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),  # [B, 64, 32, 32]\n","            nn.BatchNorm2d(32), nn.GELU(),\n","            #nn.MaxPool2d(2, 2),                                    # [B, 64, 16, 16]\n","\n","            nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),  # [B, 64, 16, 16]\n","            nn.BatchNorm2d(64), nn.GELU(),\n","            #nn.MaxPool2d(2, 2)                                     # [B, 64, 8, 8]\n","        )\n","\n","        # 8x8 Spatial Area = 64 Token. Setiap token membawa 64 in_features.\n","        self.lookthem = LookThemLayer(num_tokens=64, in_features=64, hidden_dim=32)\n","\n","        # Classifier dengan kapasitas lebih besar dan rem Dropout 0.4 untuk mencegah overfit menghafal 200 kelas\n","        self.classifier = nn.Sequential(\n","            nn.Flatten(),\n","            nn.Linear(64 * 64, 256),\n","            nn.ReLU(),\n","            nn.Dropout(0.3),\n","            nn.Linear(256, 200) # Output 200 Kelas Tiny-ImageNet\n","        )\n","\n","    def forward(self, x):\n","        batch_size = x.size(0)\n","        x = self.conv_block(x)\n","        x = x.view(batch_size, 64, 64).transpose(1, 2) # Format Token: [Batch, 64, 64]\n","        x = self.lookthem(x)\n","        return self.classifier(x)\n","\n","\n","# ==========================================\n","# 3. PROSES INISIALISASI & RUNTIME TRAINING\n","# ==========================================\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = LookThemTinyImageNet().to(device)\n","\n","criterion = nn.CrossEntropyLoss()\n","# LR disetel 0.001 dengan weight decay tipis untuk membantu kestabilan gradien rasio\n","optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)\n","\n","print(f\"πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan {device}...\")\n","\n","# Jalankan 15 Epoch awal untuk melihat performa belajarnya\n","for epoch in range(15):\n","    model.train()\n","    total_loss = 0\n","    correct = 0\n","    total = 0\n","\n","    for batch_idx, (data, target) in enumerate(train_loader):\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    acc = 100. * correct / total\n","    print(f\"Epoch {epoch+1:02d} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")\n","\n","\n","# ==========================================\n","# 4. EVALUASI JUJUR (RESET VARIABEL TERJAMIN)\n","# ==========================================\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Jujur (Variables Reseted)...\")\n","\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru\n","torch.save(model.state_dict(), \"LookThemTinyImageNet.pth\")\n","print(f\"πŸ’Ύ Model berhasil disimpan! Ukuran file: {os.path.getsize('LookThemTinyImageNet.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"72ril_OM_POr","executionInfo":{"status":"ok","timestamp":1778941914694,"user_tz":-420,"elapsed":939885,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"663e71af-48de-49c2-e7be-46189449702d"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\n","βœ… Download Selesai!\n","πŸ“¦ Mengekstrak dataset...\n","βœ… Ekstrak Selesai!\n","🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\n","βœ… Folder Validation sekarang sudah rapi dan siap diuji!\n","πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan cuda...\n","Epoch 01 -> Train Loss: 4.6991 | Train Acc: 5.91%\n","Epoch 02 -> Train Loss: 3.9973 | Train Acc: 14.41%\n","Epoch 03 -> Train Loss: 3.7383 | Train Acc: 18.31%\n","Epoch 04 -> Train Loss: 3.5488 | Train Acc: 21.17%\n","Epoch 05 -> Train Loss: 3.4567 | Train Acc: 22.73%\n","Epoch 06 -> Train Loss: 3.3583 | Train Acc: 24.25%\n","Epoch 07 -> Train Loss: 3.2681 | Train Acc: 25.89%\n","Epoch 08 -> Train Loss: 3.2076 | Train Acc: 26.87%\n","Epoch 09 -> Train Loss: 3.1444 | Train Acc: 27.98%\n","Epoch 10 -> Train Loss: 3.0852 | Train Acc: 28.98%\n","Epoch 11 -> Train Loss: 3.0375 | Train Acc: 29.72%\n","Epoch 12 -> Train Loss: 2.9929 | Train Acc: 30.51%\n","Epoch 13 -> Train Loss: 2.9502 | Train Acc: 31.27%\n","Epoch 14 -> Train Loss: 2.9084 | Train Acc: 32.09%\n","Epoch 15 -> Train Loss: 2.8710 | Train Acc: 32.56%\n","\n","πŸ”’ Memulai Pengujian Validasi Jujur (Variables Reseted)...\n","=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\n","Test Loss: 3.0152 | Test Accuracy: 31.25%\n","πŸ’Ύ Model berhasil disimpan! Ukuran file: 5.33 MB\n"]}]},{"cell_type":"code","source":["print(f\"πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan {device}...\")\n","\n","# Jalankan 15 Epoch awal untuk melihat performa belajarnya\n","for epoch in range(15, 30):\n","    model.train()\n","    total_loss = 0\n","    correct = 0\n","    total = 0\n","\n","    for batch_idx, (data, target) in enumerate(train_loader):\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    acc = 100. * correct / total\n","    print(f\"Epoch {epoch+1:02d} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")\n","\n","\n","# ==========================================\n","# 4. EVALUASI JUJUR (RESET VARIABEL TERJAMIN)\n","# ==========================================\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Jujur (Variables Reseted)...\")\n","\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru\n","torch.save(model.state_dict(), \"LookThemTinyImageNet.pth\")\n","print(f\"πŸ’Ύ Model berhasil disimpan! Ukuran file: {os.path.getsize('LookThemTinyImageNet.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"EFr244VSNS1K","executionInfo":{"status":"ok","timestamp":1778942999059,"user_tz":-420,"elapsed":369037,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"a91355db-a6da-40a8-82e6-5608d9b8dba0"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan cuda...\n","Epoch 16 -> Train Loss: 2.8393 | Train Acc: 33.47%\n","Epoch 17 -> Train Loss: 2.8121 | Train Acc: 33.71%\n","Epoch 18 -> Train Loss: 2.7853 | Train Acc: 34.12%\n","Epoch 19 -> Train Loss: 2.7684 | Train Acc: 34.62%\n","Epoch 20 -> Train Loss: 2.7385 | Train Acc: 34.97%\n","Epoch 21 -> Train Loss: 2.7159 | Train Acc: 35.45%\n","Epoch 22 -> Train Loss: 2.6975 | Train Acc: 35.88%\n","Epoch 23 -> Train Loss: 2.6735 | Train Acc: 36.17%\n","Epoch 24 -> Train Loss: 2.6523 | Train Acc: 36.49%\n","Epoch 25 -> Train Loss: 2.6296 | Train Acc: 36.91%\n","Epoch 26 -> Train Loss: 2.6086 | Train Acc: 37.34%\n","Epoch 27 -> Train Loss: 2.5844 | Train Acc: 37.78%\n","Epoch 28 -> Train Loss: 2.5721 | Train Acc: 38.00%\n","Epoch 29 -> Train Loss: 2.5451 | Train Acc: 38.29%\n","Epoch 30 -> Train Loss: 2.5247 | Train Acc: 38.80%\n","\n","πŸ”’ Memulai Pengujian Validasi Jujur (Variables Reseted)...\n","=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\n","Test Loss: 3.0226 | Test Accuracy: 32.00%\n","πŸ’Ύ Model berhasil disimpan! Ukuran file: 5.33 MB\n"]}]},{"cell_type":"code","source":[],"metadata":{"id":"M_lWRluDRQ9U"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["print(f\"πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan {device}...\")\n","\n","# Jalankan 15 Epoch awal untuk melihat performa belajarnya\n","for epoch in range(30, 45):\n","    model.train()\n","    total_loss = 0\n","    correct = 0\n","    total = 0\n","\n","    for batch_idx, (data, target) in enumerate(train_loader):\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    acc = 100. * correct / total\n","    print(f\"Epoch {epoch+1:02d} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")\n","\n","\n","# ==========================================\n","# 4. EVALUASI JUJUR (RESET VARIABEL TERJAMIN)\n","# ==========================================\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Jujur (Variables Reseted)...\")\n","\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru\n","torch.save(model.state_dict(), \"LookThemTinyImageNet.pth\")\n","print(f\"πŸ’Ύ Model berhasil disimpan! Ukuran file: {os.path.getsize('LookThemTinyImageNet.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":793},"executionInfo":{"status":"error","timestamp":1778943411483,"user_tz":-420,"elapsed":64428,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"37d80db0-f6bf-4dbf-d204-e222e9b774a7","id":"z5U5k995RR6X"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan cuda...\n","Epoch 31 -> Train Loss: 2.5227 | Train Acc: 38.78%\n","Epoch 32 -> Train Loss: 2.5018 | Train Acc: 39.10%\n","Epoch 33 -> Train Loss: 2.4789 | Train Acc: 39.62%\n","Epoch 34 -> Train Loss: 2.4680 | Train Acc: 40.01%\n","Epoch 35 -> Train Loss: 2.4601 | Train Acc: 40.04%\n"]},{"output_type":"error","ename":"KeyboardInterrupt","evalue":"","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_3524/1684637601.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      8\u001b[0m     \u001b[0mtotal\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m     \u001b[0;32mfor\u001b[0m \u001b[0mbatch_idx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32min\u001b[0m \u001b[0menumerate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtrain_loader\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     11\u001b[0m         \u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/utils/data/dataloader.py\u001b[0m in \u001b[0;36m__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m    739\u001b[0m                 \u001b[0;31m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    740\u001b[0m                 \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m  \u001b[0;31m# type: ignore[call-arg]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 741\u001b[0;31m             \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_next_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    742\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_num_yielded\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    743\u001b[0m             if (\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/utils/data/dataloader.py\u001b[0m in \u001b[0;36m_next_data\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   1522\u001b[0m                     \u001b[0;34m\"Invalid iterator state: shutdown or no outstanding tasks when fetching next data\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1523\u001b[0m                 )\n\u001b[0;32m-> 1524\u001b[0;31m             \u001b[0midx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1525\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_tasks_outstanding\u001b[0m \u001b[0;34m-=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1526\u001b[0m             \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dataset_kind\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0m_DatasetKind\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mIterable\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/utils/data/dataloader.py\u001b[0m in \u001b[0;36m_get_data\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m   1471\u001b[0m         \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_pin_memory\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1472\u001b[0m             \u001b[0;32mwhile\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_pin_memory_thread\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mis_alive\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1473\u001b[0;31m                 \u001b[0msuccess\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_try_get_data\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1474\u001b[0m                 \u001b[0;32mif\u001b[0m \u001b[0msuccess\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1475\u001b[0m                     \u001b[0;32mreturn\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/utils/data/dataloader.py\u001b[0m in \u001b[0;36m_try_get_data\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m   1308\u001b[0m         \u001b[0;31m#   (bool: whether successfully get data, any: data if successful else None)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1309\u001b[0m         \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1310\u001b[0;31m             \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_data_queue\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m   1311\u001b[0m             \u001b[0;32mreturn\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m   1312\u001b[0m         \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/lib/python3.12/queue.py\u001b[0m in \u001b[0;36mget\u001b[0;34m(self, block, timeout)\u001b[0m\n\u001b[1;32m    178\u001b[0m                     \u001b[0;32mif\u001b[0m \u001b[0mremaining\u001b[0m \u001b[0;34m<=\u001b[0m \u001b[0;36m0.0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    179\u001b[0m                         \u001b[0;32mraise\u001b[0m \u001b[0mEmpty\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 180\u001b[0;31m                     \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnot_empty\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mremaining\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    181\u001b[0m             \u001b[0mitem\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_get\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    182\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnot_full\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnotify\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/lib/python3.12/threading.py\u001b[0m in \u001b[0;36mwait\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m    357\u001b[0m             \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    358\u001b[0m                 \u001b[0;32mif\u001b[0m \u001b[0mtimeout\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 359\u001b[0;31m                     \u001b[0mgotit\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwaiter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0macquire\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    360\u001b[0m                 \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    361\u001b[0m                     \u001b[0mgotit\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mwaiter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0macquire\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}]},{"cell_type":"code","source":["model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"fyhtzSeOSlmd","executionInfo":{"status":"ok","timestamp":1778943445288,"user_tz":-420,"elapsed":5378,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"ff810e71-e18d-4c3f-83bc-c514dca65897"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\n","Test Loss: 3.0402 | Test Accuracy: 32.05%\n"]}]},{"cell_type":"markdown","source":["LookThem Tiny ImageNet 2"],"metadata":{"id":"hAk3-mYgSxbH"}},{"cell_type":"code","source":["import os\n","import zipfile\n","import urllib.request\n","import math\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","import torch.optim.lr_scheduler as lr_scheduler\n","\n","# ==========================================\n","# 1. OTOMATISASI DOWNLOAD & PREPROCESS DATASET\n","# ==========================================\n","def prepare_tiny_imagenet():\n","    dataset_url = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\n","    zip_path = \"./tiny-imagenet-200.zip\"\n","    extract_path = \"./tiny-imagenet-200\"\n","\n","    if not os.path.exists(zip_path):\n","        print(\"πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\")\n","        urllib.request.urlretrieve(dataset_url, zip_path)\n","        print(\"βœ… Download Selesai!\")\n","\n","    if not os.path.exists(extract_path):\n","        print(\"πŸ“¦ Mengekstrak dataset...\")\n","        with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n","            zip_ref.extractall(\"./\")\n","        print(\"βœ… Ekstrak Selesai!\")\n","\n","    return os.path.join(extract_path, \"train\"), os.path.join(extract_path, \"val\")\n","\n","train_dir, val_dir = prepare_tiny_imagenet()\n","\n","transform_train = transforms.Compose([\n","    transforms.RandomHorizontalFlip(),\n","    transforms.RandomRotation(15),\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","transform_val = transforms.Compose([\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","train_dataset = datasets.ImageFolder(root=train_dir, transform=transform_train)\n","\n","# MERAPIKAN FOLDER VAL ASLI TINY-IMAGENET\n","val_img_dir = \"./tiny-imagenet-200/val/images\"\n","val_annotations = \"./tiny-imagenet-200/val/val_annotations.txt\"\n","\n","if os.path.exists(val_img_dir):\n","    print(\"🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\")\n","    with open(val_annotations, \"r\") as f:\n","        lines = f.readlines()\n","\n","    for line in lines:\n","        parts = line.strip().split(\"\\t\")\n","        img_name = parts[0]\n","        class_name = parts[1]\n","\n","        class_dir = os.path.join(\"./tiny-imagenet-200/val\", class_name)\n","        os.makedirs(class_dir, exist_ok=True)\n","\n","        src_path = os.path.join(val_img_dir, img_name)\n","        dst_path = os.path.join(class_dir, img_name)\n","        if os.path.exists(src_path):\n","            os.rename(src_path, dst_path)\n","\n","    os.rmdir(val_img_dir)\n","    print(\"βœ… Folder Validation sekarang sudah rapi dan siap diuji!\")\n","\n","val_dataset = datasets.ImageFolder(root=val_dir, transform=transform_val)\n","\n","train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2, pin_memory=True)\n","val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False, num_workers=2, pin_memory=True)\n","\n","\n","# ==========================================\n","# 2. DEFINISI ARSITEKTUR LOOKTHEM (256 TOKENS VERSION)\n","# ==========================================\n","class LookThemLayer(nn.Module):\n","    def __init__(self, num_tokens, in_features, hidden_dim):\n","        super(LookThemLayer, self).__init__()\n","        self.num_tokens = num_tokens\n","        self.in_features = in_features\n","\n","        self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n","        self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","        self._init_weights()\n","\n","    def _init_weights(self):\n","        for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n","            nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n","    def forward(self, x):\n","        N = self.num_tokens\n","\n","        h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n","        out_m1 = torch.einsum('btj,tjk->btk', F.gelu(h1), self.mod1_w2) + self.mod1_b2\n","\n","        h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n","        out_m2 = torch.einsum('btj,tjk->btk', F.gelu(h2), self.mod2_w2) + self.mod2_b2\n","\n","        out_m2_safe = out_m2 + 1e-5\n","        compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n","        compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n","        bias_reshaped = self.trans_b.view(1, 1, N, 1)\n","        trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n","        trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n","        interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","\n","        mask = 1.0 - torch.eye(N, device=x.device)\n","        interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n","        return interaksi_masked.sum(dim=2) / (N - 1.0)\n","\n","\n","class LookThemTinyImageNet(nn.Module):\n","    def __init__(self):\n","        super(LookThemTinyImageNet, self).__init__()\n","\n","        # CNN dirancang stabil untuk menghasilkan keluaran spasial 16x16 (256 Token)\n","        self.conv_block = nn.Sequential(\n","            nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1),   # -> [B, 16, 32, 32]\n","            nn.BatchNorm2d(16),\n","            nn.GELU(),\n","\n","            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),  # -> [B, 32, 16, 16]\n","            nn.BatchNorm2d(32),\n","            nn.GELU(),\n","\n","            nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),  # -> [B, 64, 16, 16]\n","            nn.BatchNorm2d(64),\n","            nn.GELU(),\n","        )\n","\n","        # Matriks spasial 16x16 = 256 Token. Setiap token membawa 64 fitur.\n","        self.lookthem = LookThemLayer(num_tokens=256, in_features=64, hidden_dim=32)\n","\n","        # Pengaman Dropout 0.4 dan aktivasi GELU untuk menjaga kehalusan gradien\n","        self.classifier = nn.Sequential(\n","            nn.Flatten(),\n","            nn.Linear(256 * 64, 256),\n","            nn.GELU(),\n","            nn.Dropout(0.4),\n","            nn.Linear(256, 200)\n","        )\n","\n","    def forward(self, x):\n","        batch_size = x.size(0)\n","        x = self.conv_block(x) # Output awal: [Batch, 64, 16, 16]\n","\n","        # Penyesuaian dimensi token: meratakan spasial 16x16 menjadi 256 token\n","        x = x.view(batch_size, 64, 256).transpose(1, 2) # Hasil: [Batch, 256, 64]\n","\n","        x = self.lookthem(x)\n","        return self.classifier(x)\n","\n","\n","# ==========================================\n","# 3. PROSES INISIALISASI & RUNTIME TRAINING\n","# ==========================================\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = LookThemTinyImageNet().to(device)\n","\n","criterion = nn.CrossEntropyLoss()\n","\n","# Perubahan strategis: Weight decay diperketat ke 1e-4 untuk meredam overfitting\n","optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)\n","\n","# Total eksperimen disetel untuk 45 Epoch mengikuti skema Scaling Law\n","TOTAL_EPOCHS = 45\n","scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=TOTAL_EPOCHS)\n","\n","print(f\"πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan {device}...\")\n","\n","for epoch in range(TOTAL_EPOCHS):\n","    model.train()\n","    total_loss = 0\n","    correct = 0\n","    total = 0\n","\n","    for batch_idx, (data, target) in enumerate(train_loader):\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    acc = 100. * correct / total\n","    current_lr = optimizer.param_groups[0]['lr']\n","    print(f\"Epoch {epoch+1:02d}/{TOTAL_EPOCHS} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}% | LR: {current_lr:.6f}\")\n","\n","    # EKSEKUSI SCHEDULER: menurunkan pedal gas secara halus di setiap akhir epoch\n","    scheduler.step()\n","\n","\n","# ==========================================\n","# 4. EVALUASI JUJUR (RESET VARIABEL TERJAMIN)\n","# ==========================================\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Jujur (Variables Reseted)...\")\n","\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru\n","torch.save(model.state_dict(), \"LookThemTinyImageNet_V3.pth\")\n","print(f\"πŸ’Ύ Model berhasil disimpan! Ukuran file: {os.path.getsize('LookThemTinyImageNet_V3.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"azW3gCuJS2wa","outputId":"cb94b826-ddd0-41ec-c49a-43bf93ad20c3"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\n","βœ… Download Selesai!\n","πŸ“¦ Mengekstrak dataset...\n","βœ… Ekstrak Selesai!\n","🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\n","βœ… Folder Validation sekarang sudah rapi dan siap diuji!\n","πŸš€ Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan cuda...\n","Epoch 01/45 -> Train Loss: 4.6137 | Train Acc: 6.98% | LR: 0.001000\n","Epoch 02/45 -> Train Loss: 3.9675 | Train Acc: 14.89% | LR: 0.000999\n","Epoch 03/45 -> Train Loss: 3.6471 | Train Acc: 19.76% | LR: 0.000995\n","Epoch 04/45 -> Train Loss: 3.4594 | Train Acc: 22.86% | LR: 0.000989\n","Epoch 05/45 -> Train Loss: 3.3236 | Train Acc: 25.10% | LR: 0.000981\n","Epoch 06/45 -> Train Loss: 3.2133 | Train Acc: 26.78% | LR: 0.000970\n","Epoch 07/45 -> Train Loss: 3.1186 | Train Acc: 28.46% | LR: 0.000957\n"]}]},{"cell_type":"markdown","source":["LookThem V4"],"metadata":{"id":"G0gYldb-v8V5"}},{"cell_type":"code","source":["import os\n","import zipfile\n","import urllib.request\n","import math\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","\n","# ==========================================\n","# 1. OTOMATISASI DOWNLOAD & PREPROCESS DATASET\n","# ==========================================\n","def prepare_tiny_imagenet():\n","    dataset_url = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\n","    zip_path = \"./tiny-imagenet-200.zip\"\n","    extract_path = \"./tiny-imagenet-200\"\n","\n","    if not os.path.exists(zip_path):\n","        print(\"πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\")\n","        urllib.request.urlretrieve(dataset_url, zip_path)\n","        print(\"βœ… Download Selesai!\")\n","\n","    if not os.path.exists(extract_path):\n","        print(\"πŸ“¦ Mengekstrak dataset...\")\n","        with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n","            zip_ref.extractall(\"./\")\n","        print(\"βœ… Ekstrak Selesai!\")\n","\n","    return os.path.join(extract_path, \"train\"), os.path.join(extract_path, \"val\")\n","\n","train_dir, val_dir = prepare_tiny_imagenet()\n","\n","# Merapikan folder validasi bawaan Tiny-ImageNet\n","val_img_dir = \"./tiny-imagenet-200/val/images\"\n","val_annotations = \"./tiny-imagenet-200/val/val_annotations.txt\"\n","\n","if os.path.exists(val_img_dir):\n","    print(\"🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\")\n","    with open(val_annotations, \"r\") as f:\n","        lines = f.readlines()\n","\n","    for line in lines:\n","        parts = line.strip().split(\"\\t\")\n","        img_name, class_name = parts[0], parts[1]\n","        class_dir = os.path.join(\"./tiny-imagenet-200/val\", class_name)\n","        os.makedirs(class_dir, exist_ok=True)\n","        src_path = os.path.join(val_img_dir, img_name)\n","        dst_path = os.path.join(class_dir, img_name)\n","        if os.path.exists(src_path):\n","            os.rename(src_path, dst_path)\n","    os.rmdir(val_img_dir)\n","    print(\"βœ… Folder Validation sekarang sudah rapi!\")\n","\n","# Augmentasi standar (Input asli tetap 3 channel RGB, pemisahan dilakukan di dalam Model)\n","transform_train = transforms.Compose([\n","    transforms.RandomHorizontalFlip(),\n","    transforms.RandomRotation(15),\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","transform_val = transforms.Compose([\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","train_dataset = datasets.ImageFolder(root=train_dir, transform=transform_train)\n","val_dataset = datasets.ImageFolder(root=val_dir, transform=transform_val)\n","\n","train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2, pin_memory=True)\n","val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False, num_workers=2, pin_memory=True)\n","\n","# ==========================================\n","# 2. CORE LAYER: LOOKTHEM CORE LAYER\n","# ==========================================\n","class LookThemLayer(nn.Module):\n","    def __init__(self, num_tokens, in_features, hidden_dim):\n","        super(LookThemLayer, self).__init__()\n","        self.num_tokens = num_tokens\n","        self.in_features = in_features\n","\n","        self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n","        self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","        self._init_weights()\n","\n","    def _init_weights(self):\n","        for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n","            nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n","    def forward(self, x):\n","        N = self.num_tokens\n","        h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n","        out_m1 = torch.einsum('btj,tjk->btk', F.gelu(h1), self.mod1_w2) + self.mod1_b2\n","\n","        h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n","        out_m2 = torch.einsum('btj,tjk->btk', F.gelu(h2), self.mod2_w2) + self.mod2_b2\n","\n","        out_m2_safe = out_m2 + 1e-5\n","        compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n","        compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n","        bias_reshaped = self.trans_b.view(1, 1, N, 1)\n","        trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n","        trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n","        interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","        mask = 1.0 - torch.eye(N, device=x.device)\n","        interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n","        return interaksi_masked.sum(dim=2) / (N - 1.0)\n","\n","# ==========================================\n","# 3. INDUK ARSITEKTUR: LOOKTHEM V4 (DUAL-STREAM)\n","# ==========================================\n","class LookThemV4(nn.Module):\n","    def __init__(self):\n","        super(LookThemV4, self).__init__()\n","\n","        # --- STREAM A: STRUKTUR/BENTUK (64x64 Grayscale + Hybrid Dilated Conv) ---\n","        # Menggunakan koefisien ITU-R BT.601 untuk konversi internal ke grayscale [B, 1, 64, 64]\n","        self.register_buffer('grayscale_weights', torch.tensor([0.299, 0.587, 0.114]).view(1, 3, 1, 1))\n","\n","        self.stream_a = nn.Sequential(\n","            nn.Conv2d(1, 16, kernel_size=3, padding=1, dilation=1), # Dilation=1 (Rapat)\n","            nn.BatchNorm2d(16), nn.GELU(),\n","            nn.Conv2d(16, 32, kernel_size=3, padding=2, dilation=2), # Dilation=2 (Mulai merongga)\n","            nn.BatchNorm2d(32), nn.GELU(),\n","            nn.Conv2d(32, 32, kernel_size=3, padding=5, dilation=5), # Dilation=5 (Global Receptive Field)\n","            nn.BatchNorm2d(32), nn.GELU(),\n","            nn.AdaptiveAvgPool2d((8, 8)) # Samakan output spasial ke 8x8 (64 token)\n","        )\n","\n","        # --- STREAM B: ESENSI WARNA (8x8 RGB + Depthwise Separable Conv) ---\n","        self.downsample_b = nn.AdaptiveAvgPool2d((8, 8)) # Kecilkan paksa gambar ke 8x8 sejak awal\n","        self.stream_b = nn.Sequential(\n","            # Depthwise Stage (groups = channel input)\n","            nn.Conv2d(3, 3, kernel_size=3, padding=1, groups=3),\n","            nn.BatchNorm2d(3), nn.GELU(),\n","            # Pointwise Stage (kernel 1x1 untuk mix channel secara super ringan)\n","            nn.Conv2d(3, 32, kernel_size=1),\n","            nn.BatchNorm2d(32), nn.GELU()\n","        )\n","\n","        # --- CORE COGNITION LAYER ---\n","        # Total in_features = 32 (Stream A) + 32 (Stream B) = 64 Fitur per token. Total token = 64.\n","        self.lookthem = LookThemLayer(num_tokens=64, in_features=64, hidden_dim=32)\n","\n","        # --- CLASSIFIER RAMPING DENGAN MAXPOOL1D UNTUK EFISIENSI EKSTREM ---\n","        self.pool1d = nn.AdaptiveMaxPool1d(16) # Meringkas representasi token sebelum masuk Linear\n","        self.classifier = nn.Sequential(\n","            nn.Linear(64 * 16, 128),\n","            nn.ReLU(),\n","            nn.Dropout(0.4), # Rem ketat biar gak gampang overfit\n","            nn.Linear(128, 200)\n","        )\n","\n","    def forward(self, x):\n","        batch_size = x.size(0)\n","\n","        # Eksekusi Stream A (Ubah ke Grayscale dulu)\n","        x_gray = torch.sum(x * self.grayscale_weights, dim=1, keepdim=True)\n","        feat_a = self.stream_a(x_gray) # Output: [B, 32, 8, 8]\n","\n","        # Eksekusi Stream B\n","        x_small = self.downsample_b(x)\n","        feat_b = self.stream_b(x_small) # Output: [B, 32, 8, 8]\n","\n","        # Gabungkan Fitur Bentuk + Fitur Warna (Concatenate di dimensi Channel)\n","        feat_combined = torch.cat([feat_a, feat_b], dim=1) # Output: [B, 64, 8, 8]\n","\n","        # Flatten spasial 8x8 menjadi 64 Token\n","        feat_tokens = feat_combined.view(batch_size, 64, 64).transpose(1, 2) # [B, 64 Token, 64 Features]\n","\n","        # Masuk ke LookThem Core\n","        out_lookthem = self.lookthem(feat_tokens) # [B, 64, 64]\n","\n","        # Kompresi Fitur & Klasifikasi\n","        out_pooled = self.pool1d(out_lookthem) # [B, 64, 16]\n","        out_flat = out_pooled.view(batch_size, -1)\n","        return self.classifier(out_flat)\n","\n","# ==========================================\n","# 4. RUNTIME TRAINING + ANTI-DISCONNECT CHECKPOINT\n","# ==========================================\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = LookThemV4().to(device)\n","\n","criterion = nn.CrossEntropyLoss()\n","optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4) # Weight decay 1e-4 sebagai rem tambahan\n","scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=45)\n","\n","start_epoch = 0\n","checkpoint_path = \"lookthem_v4_checkpoint.pth\"\n","\n","# Fitur Auto-Resume: Jika runtime terputus, dia otomatis membaca berkas sisa semalam\n","if os.path.exists(checkpoint_path):\n","    print(\"πŸ’Ύ Menemukan berkas checkpoint! Melanjutkan eksperimen yang sempat terhenti...\")\n","    checkpoint = torch.load(checkpoint_path)\n","    model.load_state_dict(checkpoint['model_state_dict'])\n","    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n","    scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n","    start_epoch = checkpoint['epoch']\n","    print(f\"▢️ Berhasil resume dari Epoch ke-{start_epoch+1}\")\n","\n","print(f\"πŸš€ Memulai pengujian LookThem V4 (Dual-Stream) menggunakan {device}...\")\n","\n","for epoch in range(start_epoch, 45):\n","    model.train()\n","    total_loss, correct, total = 0, 0, 0\n","\n","    for data, target in train_loader:\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    scheduler.step()\n","    acc = 100. * correct / total\n","    current_lr = optimizer.param_groups[0]['lr']\n","    print(f\"Epoch {epoch+1:02d}/45 -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}% | LR: {current_lr:.6f}\")\n","\n","    # Mengamankan data setiap 5 epoch sekali (Sistem Penyelamat dari interupsi luar)\n","    if (epoch + 1) % 5 == 0:\n","        torch.save({\n","            'epoch': epoch + 1,\n","            'model_state_dict': model.state_dict(),\n","            'optimizer_state_dict': optimizer.state_dict(),\n","            'scheduler_state_dict': scheduler.state_dict(),\n","        }, checkpoint_path)\n","        print(f\"πŸ”’ [SYSTEM-SAVER] Progres Epoch {epoch+1} berhasil dikunci ke disk!\")\n","\n","# ==========================================\n","# 5. VALIDASI JUJUR\n","# ==========================================\n","model.eval()\n","test_loss, test_correct, test_total = 0, 0, 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Akhir...\")\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR LOOKTHEM V4 ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan model final\n","torch.save(model.state_dict(), \"LookThem_V4_Final.pth\")\n","print(f\"🏁 Selesai! Model V4 berhasil diamankan. Ukuran berkas: {os.path.getsize('LookThem_V4_Final.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":699},"id":"FGmJ6a5Jv-wh","executionInfo":{"status":"error","timestamp":1778985985152,"user_tz":-420,"elapsed":752153,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"4c64f544-5feb-434f-80cb-41633fe8c874"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\n","βœ… Download Selesai!\n","πŸ“¦ Mengekstrak dataset...\n","βœ… Ekstrak Selesai!\n","🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\n","βœ… Folder Validation sekarang sudah rapi!\n","πŸš€ Memulai pengujian LookThem V4 (Dual-Stream) menggunakan cuda...\n","Epoch 01/45 -> Train Loss: 5.0467 | Train Acc: 2.27% | LR: 0.000999\n","Epoch 02/45 -> Train Loss: 4.5716 | Train Acc: 6.25% | LR: 0.000995\n","Epoch 03/45 -> Train Loss: 4.3036 | Train Acc: 9.24% | LR: 0.000989\n","Epoch 04/45 -> Train Loss: 4.1669 | Train Acc: 10.84% | LR: 0.000981\n","Epoch 05/45 -> Train Loss: 4.0900 | Train Acc: 11.91% | LR: 0.000970\n","πŸ”’ [SYSTEM-SAVER] Progres Epoch 5 berhasil dikunci ke disk!\n","Epoch 06/45 -> Train Loss: 4.0300 | Train Acc: 12.73% | LR: 0.000957\n","Epoch 07/45 -> Train Loss: 3.9907 | Train Acc: 13.22% | LR: 0.000941\n","Epoch 08/45 -> Train Loss: 3.9669 | Train Acc: 13.64% | LR: 0.000924\n","Epoch 09/45 -> Train Loss: 3.9378 | Train Acc: 13.97% | LR: 0.000905\n","Epoch 10/45 -> Train Loss: 3.9139 | Train Acc: 14.40% | LR: 0.000883\n","πŸ”’ [SYSTEM-SAVER] Progres Epoch 10 berhasil dikunci ke disk!\n"]},{"output_type":"error","ename":"KeyboardInterrupt","evalue":"","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_3667/1115099348.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m    230\u001b[0m         \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    231\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 232\u001b[0;31m         \u001b[0mtotal_loss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    233\u001b[0m         \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpredicted\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    234\u001b[0m         \u001b[0mtotal\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}]},{"cell_type":"code","source":["model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"SUhpWOxq0m7y","executionInfo":{"status":"ok","timestamp":1778985995833,"user_tz":-420,"elapsed":4493,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"b22711b1-52ef-4e8a-9133-d8ba65272374"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\n","Test Loss: 3.5995 | Test Accuracy: 20.63%\n"]}]},{"cell_type":"markdown","source":["V4.1 (worst I think)"],"metadata":{"id":"E4qMV5PZ1C_U"}},{"cell_type":"code","source":["import os\n","import zipfile\n","import urllib.request\n","import math\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","\n","# ==========================================\n","# 1. OTOMATISASI DOWNLOAD & PREPROCESS DATASET\n","# ==========================================\n","def prepare_tiny_imagenet():\n","    dataset_url = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\n","    zip_path = \"./tiny-imagenet-200.zip\"\n","    extract_path = \"./tiny-imagenet-200\"\n","\n","    if not os.path.exists(zip_path):\n","        print(\"πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\")\n","        urllib.request.urlretrieve(dataset_url, zip_path)\n","        print(\"βœ… Download Selesai!\")\n","\n","    if not os.path.exists(extract_path):\n","        print(\"πŸ“¦ Mengekstrak dataset...\")\n","        with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n","            zip_ref.extractall(\"./\")\n","        print(\"βœ… Ekstrak Selesai!\")\n","\n","    return os.path.join(extract_path, \"train\"), os.path.join(extract_path, \"val\")\n","\n","train_dir, val_dir = prepare_tiny_imagenet()\n","\n","# Merapikan folder validasi bawaan Tiny-ImageNet\n","val_img_dir = \"./tiny-imagenet-200/val/images\"\n","val_annotations = \"./tiny-imagenet-200/val/val_annotations.txt\"\n","\n","if os.path.exists(val_img_dir):\n","    print(\"🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\")\n","    with open(val_annotations, \"r\") as f:\n","        lines = f.readlines()\n","\n","    for line in lines:\n","        parts = line.strip().split(\"\\t\")\n","        img_name, class_name = parts[0], parts[1]\n","        class_dir = os.path.join(\"./tiny-imagenet-200/val\", class_name)\n","        os.makedirs(class_dir, exist_ok=True)\n","        src_path = os.path.join(val_img_dir, img_name)\n","        dst_path = os.path.join(class_dir, img_name)\n","        if os.path.exists(src_path):\n","            os.rename(src_path, dst_path)\n","    os.rmdir(val_img_dir)\n","    print(\"βœ… Folder Validation sekarang sudah rapi!\")\n","\n","transform_train = transforms.Compose([\n","    transforms.RandomHorizontalFlip(),\n","    transforms.RandomRotation(15),\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","transform_val = transforms.Compose([\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","train_dataset = datasets.ImageFolder(root=train_dir, transform=transform_train)\n","val_dataset = datasets.ImageFolder(root=val_dir, transform=transform_val)\n","\n","train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True, num_workers=2, pin_memory=True)\n","val_loader = DataLoader(val_dataset, batch_size=64, shuffle=False, num_workers=2, pin_memory=True,)\n","\n","# ==========================================\n","# 2. CORE LAYER: LOOKTHEM CORE LAYER\n","# ==========================================\n","class LookThemLayer(nn.Module):\n","    def __init__(self, num_tokens, in_features, hidden_dim):\n","        super(LookThemLayer, self).__init__()\n","        self.num_tokens = num_tokens\n","        self.in_features = in_features\n","\n","        self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n","        self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","        self._init_weights()\n","\n","    def _init_weights(self):\n","        for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n","            nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n","    def forward(self, x):\n","        N = self.num_tokens\n","        h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n","        out_m1 = torch.einsum('btj,tjk->btk', F.gelu(h1), self.mod1_w2) + self.mod1_b2\n","\n","        h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n","        out_m2 = torch.einsum('btj,tjk->btk', F.gelu(h2), self.mod2_w2) + self.mod2_b2\n","\n","        out_m2_safe = out_m2 + 1e-5\n","        compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n","        compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n","        bias_reshaped = self.trans_b.view(1, 1, N, 1)\n","        trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n","        trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n","        interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","        mask = 1.0 - torch.eye(N, device=x.device)\n","        interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n","        return interaksi_masked.sum(dim=2) / (N - 1.0)\n","\n","# ==========================================\n","# 3. INDUK ARSITEKTUR: LOOKTHEM V4.1 (THE STRIDE UPDATE)\n","# ==========================================\n","class LookThemV4_1(nn.Module):\n","    def __init__(self):\n","        super(LookThemV4_1, self).__init__()\n","\n","        # Menggunakan koefisien ITU-R BT.601 untuk konversi internal ke grayscale\n","        self.register_buffer('grayscale_weights', torch.tensor([0.299, 0.587, 0.114]).view(1, 3, 1, 1))\n","\n","        # --- STREAM A: STRUKTUR/BENTUK (64x64 Grayscale -> 16x16 lewat Stride) ---\n","        self.stream_a = nn.Sequential(\n","            nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),  # [B, 16, 32, 32]\n","            nn.BatchNorm2d(16), nn.GELU(),\n","            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # [B, 32, 16, 16] - Ditahan di 16x16\n","            nn.BatchNorm2d(32), nn.GELU()\n","        )\n","\n","        # --- STREAM B: ESENSI WARNA (64x64 RGB -> 16x16 lewat Depthwise Separable Stride) ---\n","        self.stream_b = nn.Sequential(\n","            # Tahap 1: Turun ke 32x32\n","            nn.Conv2d(3, 3, kernel_size=3, stride=2, padding=1, groups=3),\n","            nn.BatchNorm2d(3), nn.GELU(),\n","            nn.Conv2d(3, 16, kernel_size=1),\n","            nn.BatchNorm2d(16), nn.GELU(),\n","\n","            # Tahap 2: Turun ke 16x16 (Ditahan biar fitur warna tidak hancur lebur)\n","            nn.Conv2d(16, 16, kernel_size=3, stride=2, padding=1, groups=16),\n","            nn.BatchNorm2d(16), nn.GELU(),\n","            nn.Conv2d(16, 32, kernel_size=1),\n","            nn.BatchNorm2d(32), nn.GELU()\n","        )\n","\n","        # --- JALUR PENYATUAN DAN AKHIR DOWNSTREAM (16x16 -> 8x8 lewat Stride) ---\n","        # Menggabungkan Channel Stream A (32) + Stream B (32) = 64 Channel\n","        self.merge_downsample = nn.Sequential(\n","            nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, groups=64), # Depthwise Stride\n","            nn.BatchNorm2d(64), nn.GELU(),\n","            nn.Conv2d(64, 64, kernel_size=1),                                # Pointwise Mix\n","            nn.BatchNorm2d(64), nn.GELU()                                    # Output: [B, 64, 8, 8]\n","        )\n","\n","        # --- CORE COGNITION LAYER (8x8 Spasial = 64 Token, 64 Features) ---\n","        self.lookthem = LookThemLayer(num_tokens=64, in_features=64, hidden_dim=32)\n","\n","        # --- CLASSIFIER RAMPING ---\n","        self.pool1d = nn.AdaptiveMaxPool1d(16)\n","        self.classifier = nn.Sequential(\n","            nn.Linear(64 * 16, 128),\n","            nn.ReLU(),\n","            nn.Dropout(0.4),\n","            nn.Linear(128, 200)\n","        )\n","\n","    def forward(self, x):\n","        batch_size = x.size(0)\n","\n","        # 1. Jalankan Stream A (Bentuk - Grayscale)\n","        x_gray = torch.sum(x * self.grayscale_weights, dim=1, keepdim=True)\n","        feat_a = self.stream_a(x_gray) # [B, 32, 16, 16]\n","\n","        # 2. Jalankan Stream B (Warna - RGB)\n","        feat_b = self.stream_b(x) # [B, 32, 16, 16]\n","\n","        # 3. Concat di resolusi 16x16, lalu Stride turun ke 8x8\n","        feat_combined = torch.cat([feat_a, feat_b], dim=1) # [B, 64, 16, 16]\n","        feat_downsampled = self.merge_downsample(feat_combined) # [B, 64, 8, 8]\n","\n","        # 4. Flatten menjadi format Token [Batch, 64 Token, 64 Features]\n","        feat_tokens = feat_downsampled.view(batch_size, 64, 64).transpose(1, 2)\n","\n","        # 5. Masuk ke LookThem Core & Klasifikasi\n","        out_lookthem = self.lookthem(feat_tokens)\n","        out_pooled = self.pool1d(out_lookthem)\n","        out_flat = out_pooled.view(batch_size, -1)\n","        return self.classifier(out_flat)\n","\n","# ==========================================\n","# 4. RUNTIME TRAINING + AUTO-CHECKPOINT\n","# ==========================================\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = LookThemV4_1().to(device)\n","\n","criterion = nn.CrossEntropyLoss()\n","optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)\n","scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=45)\n","\n","start_epoch = 0\n","checkpoint_path = \"lookthem_v4_1_checkpoint.pth\"\n","\n","if os.path.exists(checkpoint_path):\n","    print(\"πŸ’Ύ Checkpoint V4.1 ditemukan! Melanjutkan progres eksperimen...\")\n","    checkpoint = torch.load(checkpoint_path)\n","    model.load_state_dict(checkpoint['model_state_dict'])\n","    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n","    scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n","    start_epoch = checkpoint['epoch']\n","    print(f\"▢️ Berhasil resume dari Epoch ke-{start_epoch+1}\")\n","\n","print(f\"πŸš€ Memulai pengujian LookThem V4.1 (The Stride Update) menggunakan {device}...\")\n","\n","for epoch in range(start_epoch, 45):\n","    model.train()\n","    total_loss, correct, total = 0, 0, 0\n","\n","    for data, target in train_loader:\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    scheduler.step()\n","    acc = 100. * correct / total\n","    current_lr = optimizer.param_groups[0]['lr']\n","    print(f\"Epoch {epoch+1:02d}/45 -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}% | LR: {current_lr:.6f}\")\n","\n","    if (epoch + 1) % 5 == 0:\n","        torch.save({\n","            'epoch': epoch + 1,\n","            'model_state_dict': model.state_dict(),\n","            'optimizer_state_dict': optimizer.state_dict(),\n","            'scheduler_state_dict': scheduler.state_dict(),\n","        }, checkpoint_path)\n","        print(f\"πŸ”’ [SYSTEM-SAVER] Progres Epoch {epoch+1} berhasil dikunci ke disk!\")\n","\n","# ==========================================\n","# 5. VALIDASI JUJUR\n","# ==========================================\n","model.eval()\n","test_loss, test_correct, test_total = 0, 0, 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Akhir...\")\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR LOOKTHEM V4.1 ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","torch.save(model.state_dict(), \"LookThem_V4_1_Final.pth\")\n","print(f\"🏁 Selesai! Ukuran berkas final: {os.path.getsize('LookThem_V4_1_Final.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":862},"id":"wrjhC8Wl193U","executionInfo":{"status":"error","timestamp":1778987739653,"user_tz":-420,"elapsed":248124,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"d08504f1-eb7d-4a99-940b-4f8c458a6469"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\n","βœ… Download Selesai!\n","πŸ“¦ Mengekstrak dataset...\n","βœ… Ekstrak Selesai!\n","🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\n","βœ… Folder Validation sekarang sudah rapi!\n","πŸš€ Memulai pengujian LookThem V4.1 (The Stride Update) menggunakan cuda...\n","Epoch 01/45 -> Train Loss: 4.9737 | Train Acc: 2.56% | LR: 0.000999\n","Epoch 02/45 -> Train Loss: 4.5057 | Train Acc: 6.47% | LR: 0.000995\n"]},{"output_type":"error","ename":"KeyboardInterrupt","evalue":"","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_9564/2449302467.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m    232\u001b[0m         \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    233\u001b[0m         \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 234\u001b[0;31m         \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    235\u001b[0m         \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    236\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/_tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m    628\u001b[0m                 \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    629\u001b[0m             )\n\u001b[0;32m--> 630\u001b[0;31m         torch.autograd.backward(\n\u001b[0m\u001b[1;32m    631\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    632\u001b[0m         )\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m    362\u001b[0m     \u001b[0;31m# some Python versions print out the first line of a multi-line function\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    363\u001b[0m     \u001b[0;31m# calls in the traceback and some print out the last line\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 364\u001b[0;31m     _engine_run_backward(\n\u001b[0m\u001b[1;32m    365\u001b[0m         \u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    366\u001b[0m         \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/autograd/graph.py\u001b[0m in \u001b[0;36m_engine_run_backward\u001b[0;34m(t_outputs, *args, **kwargs)\u001b[0m\n\u001b[1;32m    863\u001b[0m         \u001b[0munregister_hooks\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_register_logging_hooks_on_whole_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt_outputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    864\u001b[0m     \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 865\u001b[0;31m         return Variable._execution_engine.run_backward(  # Calls into the C++ engine to run the backward pass\n\u001b[0m\u001b[1;32m    866\u001b[0m             \u001b[0mt_outputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    867\u001b[0m         )  # Calls into the C++ engine to run the backward pass\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}]},{"cell_type":"markdown","source":["V5"],"metadata":{"id":"_qrb872rAYLd"}},{"cell_type":"code","source":["import os\n","import zipfile\n","import urllib.request\n","import math\n","import torch\n","import torch.nn as nn\n","import torch.nn.functional as F\n","import torch.optim as optim\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","\n","# ==========================================\n","# 1. DOWNLOAD & PREPROCESS DATASET (TINY-IMAGENET)\n","# ==========================================\n","def prepare_tiny_imagenet():\n","    dataset_url = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\n","    zip_path = \"./tiny-imagenet-200.zip\"\n","    extract_path = \"./tiny-imagenet-200\"\n","\n","    if not os.path.exists(zip_path):\n","        print(\"πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\")\n","        urllib.request.urlretrieve(dataset_url, zip_path)\n","        print(\"βœ… Download Selesai!\")\n","\n","    if not os.path.exists(extract_path):\n","        print(\"πŸ“¦ Mengekstrak dataset...\")\n","        with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n","            zip_ref.extractall(\"./\")\n","        print(\"βœ… Ekstrak Selesai!\")\n","\n","    return os.path.join(extract_path, \"train\"), os.path.join(extract_path, \"val\")\n","\n","train_dir, val_dir = prepare_tiny_imagenet()\n","\n","# Merapikan folder validasi bawaan Tiny-ImageNet\n","val_img_dir = \"./tiny-imagenet-200/val/images\"\n","val_annotations = \"./tiny-imagenet-200/val/val_annotations.txt\"\n","\n","if os.path.exists(val_img_dir):\n","    print(\"🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\")\n","    with open(val_annotations, \"r\") as f:\n","        lines = f.readlines()\n","\n","    for line in lines:\n","        parts = line.strip().split(\"\\t\")\n","        img_name, class_name = parts[0], parts[1]\n","        class_dir = os.path.join(\"./tiny-imagenet-200/val\", class_name)\n","        os.makedirs(class_dir, exist_ok=True)\n","        src_path = os.path.join(val_img_dir, img_name)\n","        dst_path = os.path.join(class_dir, img_name)\n","        if os.path.exists(src_path):\n","            os.rename(src_path, dst_path)\n","    os.rmdir(val_img_dir)\n","    print(\"βœ… Folder Validation sekarang sudah rapi!\")\n","\n","transform_train = transforms.Compose([\n","    transforms.RandomHorizontalFlip(),\n","    transforms.RandomRotation(15),\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","transform_val = transforms.Compose([\n","    transforms.ToTensor(),\n","    transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","train_dataset = datasets.ImageFolder(root=train_dir, transform=transform_train)\n","val_dataset = datasets.ImageFolder(root=val_dir, transform=transform_val)\n","\n","train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2, pin_memory=True)\n","val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False, num_workers=2, pin_memory=True)\n","\n","# ==========================================\n","# 2. CORE LAYER: LOOKTHEM CORE LAYER\n","# ==========================================\n","class LookThemLayer(nn.Module):\n","    def __init__(self, num_tokens, in_features, hidden_dim):\n","        super(LookThemLayer, self).__init__()\n","        self.num_tokens = num_tokens\n","        self.in_features = in_features\n","\n","        self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n","        self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n","        self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n","        self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n","        self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n","        self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","        self._init_weights()\n","\n","    def _init_weights(self):\n","        for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n","            nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n","    def forward(self, x):\n","        N = self.num_tokens\n","        h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n","        out_m1 = torch.einsum('btj,tjk->btk', F.gelu(h1), self.mod1_w2) + self.mod1_b2\n","\n","        h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n","        out_m2 = torch.einsum('btj,tjk->btk', F.gelu(h2), self.mod2_w2) + self.mod2_b2\n","\n","        out_m2_safe = out_m2 + 1e-5\n","        compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n","        compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n","        bias_reshaped = self.trans_b.view(1, 1, N, 1)\n","        trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n","        trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n","        interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","        mask = 1.0 - torch.eye(N, device=x.device)\n","        interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n","        return interaksi_masked.sum(dim=2) / (N - 1.0)\n","\n","# ==========================================\n","# 3. INDUK ARSITEKTUR: LOOKTHEM V5 (STANDARD CONV STREAM B)\n","# ==========================================\n","class LookThemV5(nn.Module):\n","    def __init__(self):\n","        super(LookThemV5, self).__init__()\n","\n","        self.register_buffer('grayscale_weights', torch.tensor([0.299, 0.587, 0.114]).view(1, 3, 1, 1))\n","\n","        # --- STREAM A: BENTUK MAKRO (Ditahan di 16x16 - Resolusi Tinggi) ---\n","        self.stream_a = nn.Sequential(\n","            nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),  # [B, 16, 32, 32]\n","            nn.BatchNorm2d(16), nn.GELU(),\n","            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # [B, 32, 16, 16] (256 Luas Spasial)\n","            nn.BatchNorm2d(32), nn.GELU()\n","        )\n","\n","        # Jembatan Progresif Token: Mengompres spasial 256 -> 64 token secara cerdas\n","        self.token_bridge = nn.Linear(256, 64)\n","\n","        # --- STREAM B: ESENSI WARNA (Mengecil ke 8x8 Mengikuti Pola V2 - Standard Conv) ---\n","        self.stream_b = nn.Sequential(\n","            nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1),  # [B, 16, 32, 32]\n","            nn.BatchNorm2d(16), nn.GELU(),\n","            nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # [B, 32, 16, 16]\n","            nn.BatchNorm2d(32), nn.GELU(),\n","\n","            # UPDATE V5: Menggunakan Standard Conv Stride murni gaya V2 agar kombinasi warna langsung padat\n","            nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1), # [B, 32, 8, 8] (64 Luas Spasial)\n","            nn.BatchNorm2d(32), nn.GELU()\n","        )\n","\n","        # --- CORE COGNITION LAYER (64 Token, Gabungan Features 32 + 32 = 64) ---\n","        self.lookthem = LookThemLayer(num_tokens=64, in_features=64, hidden_dim=32)\n","\n","        # --- CLASSIFIER RAMPING ANTI-OVERFIT ---\n","        self.classifier = nn.Sequential(\n","            nn.Flatten(),\n","            nn.Linear(64 * 64, 256),\n","            nn.ReLU(),\n","            nn.Dropout(0.4),\n","            nn.Linear(256, 200)\n","        )\n","\n","    def forward(self, x):\n","        batch_size = x.size(0)\n","\n","        # 1. Ekstrak Stream A (Grayscale 16x16)\n","        x_gray = torch.sum(x * self.grayscale_weights, dim=1, keepdim=True)\n","        feat_a = self.stream_a(x_gray) # [B, 32, 16, 16]\n","        feat_a_flat = feat_a.view(batch_size, 32, 256)\n","        feat_a_compressed = self.token_bridge(feat_a_flat) # [B, 32, 64]\n","        feat_a_tokens = feat_a_compressed.transpose(1, 2) # [B, 64 Token, 32 Features]\n","\n","        # 2. Ekstrak Stream B (RGB 8x8 via Standard Conv)\n","        feat_b = self.stream_b(x) # [B, 32, 8, 8]\n","        feat_b_tokens = feat_b.view(batch_size, 32, 64).transpose(1, 2) # [B, 64 Token, 32 Features]\n","\n","        # 3. Penggabungan Asimetris Tingkat Features (Total tetap 64 Token)\n","        tokens_combined = torch.cat([feat_a_tokens, feat_b_tokens], dim=2) # [B, 64 Token, 64 Features]\n","\n","        # 4. Kognisi Relasional & Klasifikasi\n","        out_lookthem = self.lookthem(tokens_combined)\n","        return self.classifier(out_lookthem)\n","\n","# ==========================================\n","# 4. RUNTIME TRAINING + CHECKPOINT SYSTEM\n","# ==========================================\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = LookThemV5().to(device)\n","\n","criterion = nn.CrossEntropyLoss()\n","optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)\n","scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=20)\n","\n","start_epoch = 0\n","checkpoint_path = \"lookthem_v5_checkpoint.pth\"\n","\n","if os.path.exists(checkpoint_path):\n","    print(\"πŸ’Ύ Checkpoint V5 ditemukan! Melanjutkan progres eksperimen...\")\n","    checkpoint = torch.load(checkpoint_path)\n","    model.load_state_dict(checkpoint['model_state_dict'])\n","    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n","    scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n","    start_epoch = checkpoint['epoch']\n","    print(f\"▢️ Berhasil resume dari Epoch ke-{start_epoch+1}\")\n","\n","print(f\"πŸš€ Memulai pengujian LookThem V5 (Asymmetric Fusion) menggunakan {device}...\")\n","\n","for epoch in range(start_epoch, 20):\n","    model.train()\n","    total_loss, correct, total = 0, 0, 0\n","\n","    for data, target in train_loader:\n","        data, target = data.to(device), target.to(device)\n","\n","        optimizer.zero_grad()\n","        output = model(data)\n","        loss = criterion(output, target)\n","        loss.backward()\n","        optimizer.step()\n","\n","        total_loss += loss.item()\n","        _, predicted = output.max(1)\n","        total += target.size(0)\n","        correct += predicted.eq(target).sum().item()\n","\n","    scheduler.step()\n","    acc = 100. * correct / total\n","    current_lr = optimizer.param_groups[0]['lr']\n","    print(f\"Epoch {epoch+1:02d}/45 -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}% | LR: {current_lr:.6f}\")\n","\n","    if (epoch + 1) % 5 == 0:\n","        torch.save({\n","            'epoch': epoch + 1,\n","            'model_state_dict': model.state_dict(),\n","            'optimizer_state_dict': optimizer.state_dict(),\n","            'scheduler_state_dict': scheduler.state_dict(),\n","        }, checkpoint_path)\n","        print(f\"πŸ”’ [SYSTEM-SAVER] Progres Epoch {epoch+1} berhasil dikunci ke disk!\")\n","\n","# ==========================================\n","# 5. VALIDASI AKHIR\n","# ==========================================\n","model.eval()\n","test_loss, test_correct, test_total = 0, 0, 0\n","\n","print(\"\\nπŸ”’ Memulai Pengujian Validasi Akhir...\")\n","with torch.no_grad():\n","    for data, target in val_loader:\n","        data, target = data.to(device), target.to(device)\n","        output = model(data)\n","        loss = criterion(output, target)\n","\n","        test_loss += loss.item()\n","        _, predicted = output.max(1)\n","        test_total += target.size(0)\n","        test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR LOOKTHEM V5 ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","torch.save(model.state_dict(), \"LookThem_V5_Final.pth\")\n","print(f\"🏁 Selesai! Ukuran berkas final: {os.path.getsize('LookThem_V5_Final.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"tFR4mJB3Aavo","executionInfo":{"status":"ok","timestamp":1778990360196,"user_tz":-420,"elapsed":1266091,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"1a01a465-c292-45c6-bcb7-67401ad90d97"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stdout","text":["πŸ“₯ Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\n","βœ… Download Selesai!\n","πŸ“¦ Mengekstrak dataset...\n","βœ… Ekstrak Selesai!\n","🧹 Merapikan struktur folder Validation asli Tiny-ImageNet...\n","βœ… Folder Validation sekarang sudah rapi!\n","πŸš€ Memulai pengujian LookThem V5 (Asymmetric Fusion) menggunakan cuda...\n","Epoch 01/45 -> Train Loss: 4.6931 | Train Acc: 6.17% | LR: 0.000994\n","Epoch 02/45 -> Train Loss: 3.9831 | Train Acc: 14.51% | LR: 0.000976\n","Epoch 03/45 -> Train Loss: 3.7392 | Train Acc: 18.13% | LR: 0.000946\n","Epoch 04/45 -> Train Loss: 3.5954 | Train Acc: 20.43% | LR: 0.000905\n","Epoch 05/45 -> Train Loss: 3.4815 | Train Acc: 22.30% | LR: 0.000854\n","πŸ”’ [SYSTEM-SAVER] Progres Epoch 5 berhasil dikunci ke disk!\n","Epoch 06/45 -> Train Loss: 3.4102 | Train Acc: 23.62% | LR: 0.000794\n","Epoch 07/45 -> Train Loss: 3.3107 | Train Acc: 25.34% | LR: 0.000727\n","Epoch 08/45 -> Train Loss: 3.2379 | Train Acc: 26.54% | LR: 0.000655\n","Epoch 09/45 -> Train Loss: 3.1538 | Train Acc: 28.26% | LR: 0.000578\n","Epoch 10/45 -> Train Loss: 3.0881 | Train Acc: 28.99% | LR: 0.000500\n","πŸ”’ [SYSTEM-SAVER] Progres Epoch 10 berhasil dikunci ke disk!\n","Epoch 11/45 -> Train Loss: 3.0161 | Train Acc: 30.28% | LR: 0.000422\n","Epoch 12/45 -> Train Loss: 2.9458 | Train Acc: 31.43% | LR: 0.000345\n","Epoch 13/45 -> Train Loss: 2.8860 | Train Acc: 32.66% | LR: 0.000273\n","Epoch 14/45 -> Train Loss: 2.8269 | Train Acc: 33.79% | LR: 0.000206\n","Epoch 15/45 -> Train Loss: 2.7800 | Train Acc: 34.57% | LR: 0.000146\n","πŸ”’ [SYSTEM-SAVER] Progres Epoch 15 berhasil dikunci ke disk!\n","Epoch 16/45 -> Train Loss: 2.7267 | Train Acc: 35.61% | LR: 0.000095\n","Epoch 17/45 -> Train Loss: 2.7011 | Train Acc: 36.01% | LR: 0.000054\n","Epoch 18/45 -> Train Loss: 2.6688 | Train Acc: 36.62% | LR: 0.000024\n","Epoch 19/45 -> Train Loss: 2.6510 | Train Acc: 36.94% | LR: 0.000006\n","Epoch 20/45 -> Train Loss: 2.6438 | Train Acc: 36.98% | LR: 0.000000\n","πŸ”’ [SYSTEM-SAVER] Progres Epoch 20 berhasil dikunci ke disk!\n","\n","πŸ”’ Memulai Pengujian Validasi Akhir...\n","=== HASIL EVALUASI AKHIR LOOKTHEM V5 ===\n","Test Loss: 2.8460 | Test Accuracy: 34.32%\n","🏁 Selesai! Ukuran berkas final: 5.38 MB\n"]}]}]}