{"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"gpuType":"T4","authorship_tag":"ABX9TyORSm+tYW40LkABBcqweY1A"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":1,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"KAV4PCgrK6nM","executionInfo":{"status":"error","timestamp":1778908866260,"user_tz":-420,"elapsed":553508,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"4cdc9f07-a436-49a1-c302-6e6658f5aed5"},"outputs":[{"metadata":{"tags":null},"name":"stderr","output_type":"stream","text":["100%|██████████| 9.91M/9.91M [00:00<00:00, 132MB/s]\n","100%|██████████| 28.9k/28.9k [00:00<00:00, 12.8MB/s]\n","100%|██████████| 1.65M/1.65M [00:00<00:00, 89.1MB/s]\n","100%|██████████| 4.54k/4.54k [00:00<00:00, 6.72MB/s]\n"]},{"output_type":"stream","name":"stdout","text":["Memulai pengujian arsitektur LookThem di cpu...\n","Epoch 1 -> Loss: 0.6520 | Accuracy: 78.83%\n","Epoch 2 -> Loss: 0.1329 | Accuracy: 95.96%\n","Epoch 3 -> Loss: 0.0900 | Accuracy: 97.24%\n","Epoch 4 -> Loss: 0.0726 | Accuracy: 97.78%\n","Epoch 5 -> Loss: 0.0628 | Accuracy: 98.06%\n","Epoch 6 -> Loss: 0.0553 | Accuracy: 98.30%\n","Epoch 7 -> Loss: 0.0475 | Accuracy: 98.50%\n","Epoch 8 -> Loss: 0.0425 | Accuracy: 98.64%\n","Epoch 9 -> Loss: 0.0378 | Accuracy: 98.83%\n","Epoch 10 -> Loss: 0.0340 | Accuracy: 98.94%\n","Epoch 11 -> Loss: 0.0307 | Accuracy: 99.02%\n"]},{"output_type":"error","ename":"KeyboardInterrupt","evalue":"","traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_6610/2911652163.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 141\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 142\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 143\u001b[0;31m \u001b[0moptimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 145\u001b[0m \u001b[0mtotal_loss\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/optim/optimizer.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 524\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 525\u001b[0m \u001b[0;31m# pyrefly: ignore [invalid-param-spec]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 526\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 527\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_optimizer_step_code\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 528\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/optim/optimizer.py\u001b[0m in \u001b[0;36m_use_grad\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset_grad_enabled\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdefaults\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"differentiable\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 80\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dynamo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_break\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 81\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 82\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_dynamo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgraph_break\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/optim/adam.py\u001b[0m in \u001b[0;36mstep\u001b[0;34m(self, closure)\u001b[0m\n\u001b[1;32m 246\u001b[0m )\n\u001b[1;32m 247\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 248\u001b[0;31m adam(\n\u001b[0m\u001b[1;32m 249\u001b[0m \u001b[0mparams_with_grad\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 250\u001b[0m \u001b[0mgrads\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/optim/optimizer.py\u001b[0m in \u001b[0;36mmaybe_fallback\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 149\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdisabled_func\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 151\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 152\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 153\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmaybe_fallback\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/optim/adam.py\u001b[0m in \u001b[0;36madam\u001b[0;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, foreach, capturable, differentiable, fused, grad_scale, found_inf, has_complex, decoupled_weight_decay, amsgrad, beta1, beta2, lr, weight_decay, eps, maximize)\u001b[0m\n\u001b[1;32m 968\u001b[0m \u001b[0mfunc\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_single_tensor_adam\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 969\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 970\u001b[0;31m func(\n\u001b[0m\u001b[1;32m 971\u001b[0m \u001b[0mparams\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 972\u001b[0m \u001b[0mgrads\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.12/dist-packages/torch/optim/adam.py\u001b[0m in \u001b[0;36m_single_tensor_adam\u001b[0;34m(params, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, grad_scale, found_inf, amsgrad, has_complex, beta1, beta2, lr, weight_decay, eps, maximize, capturable, differentiable, decoupled_weight_decay)\u001b[0m\n\u001b[1;32m 545\u001b[0m \u001b[0mdenom\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mexp_avg_sq\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mbias_correction2_sqrt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0meps\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 546\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 547\u001b[0;31m \u001b[0mparam\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maddcdiv_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexp_avg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdenom\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mstep_size\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[arg-type]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 548\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 549\u001b[0m \u001b[0;31m# Lastly, switch back to complex view\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mKeyboardInterrupt\u001b[0m: "]}],"source":["\n","import torch\n","import torch.nn as nn\n","import torch.optim as optim\n","import math\n","from torch.utils.data import DataLoader, Subset\n","from torchvision import datasets, transforms\n","\n","class LookThemLayer(nn.Module):\n"," \"\"\"\n"," Sub-modul LookThem versi paralel (Vectorized).\n"," Bisa dipakai berulang kali seperti nn.Linear atau nn.Conv2d.\n"," \"\"\"\n"," def __init__(self, num_tokens, in_features, hidden_dim):\n"," super(LookThemLayer, self).__init__()\n"," self.num_tokens = num_tokens\n"," self.in_features = in_features\n","\n"," # Batched Parameters\n"," self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n"," self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n"," self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n"," self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n"," self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n"," self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n"," self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n"," self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self._init_weights()\n","\n"," def _init_weights(self):\n"," for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n"," nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n"," def forward(self, x):\n"," N = self.num_tokens\n","\n"," # 1. Mod1 & Mod2\n"," h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n"," out_m1 = torch.einsum('btj,tjk->btk', torch.relu(h1), self.mod1_w2) + self.mod1_b2\n","\n"," h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n"," out_m2 = torch.einsum('btj,tjk->btk', torch.relu(h2), self.mod2_w2) + self.mod2_b2\n","\n"," # 2. Rasio Kombinasi + Tanh (Penjinak dari lonjakan nilai ekstrem)\n"," out_m2_safe = out_m2 + 1e-7\n"," compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n"," compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n"," # 3. Transformasi j\n"," bias_reshaped = self.trans_b.view(1, 1, N, 1)\n"," trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n"," trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n"," # 4. Interaksi dengan fitur asli x\n"," interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","\n"," # 5. Masking diri sendiri (i == j)\n"," mask = 1.0 - torch.eye(N, device=x.device)\n"," interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n"," # 6. Rata-rata lingkungan luar\n"," out_i = interaksi_masked.sum(dim=2) / (N - 1.0)\n"," return out_i\n","\n","\n","class LookThemNet(nn.Module):\n"," def __init__(self):\n"," super(LookThemNet, self).__init__()\n","\n"," # 1. Cukup gunakan satu blok konvolusi untuk menyaring + mengecilkan gambar\n"," self.conv_block = nn.Sequential(\n"," nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1), # [Batch, 16, 28, 28]\n"," nn.ReLU(),\n"," nn.MaxPool2d(4, 4) # Langsung dikecilkan jadi [Batch, 16, 7, 7]\n"," )\n","\n"," # 2. Token cuma 49 (7x7), in_features cuma 16 channel. Jauh lebih ringan di CPU!\n"," self.lookthem = LookThemLayer(num_tokens=49, in_features=16, hidden_dim=16)\n","\n"," # 3. Classifier langsung dari output LookThem\n"," self.classifier = nn.Sequential(\n"," nn.Flatten(),\n"," nn.Linear(49 * 16, 64),\n"," nn.ReLU(),\n"," nn.Linear(64, 10)\n"," )\n","\n"," def forward(self, x):\n"," batch_size = x.size(0)\n","\n"," # Lewat CNN -> [Batch, 16, 7, 7]\n"," x = self.conv_block(x)\n","\n"," # Reshape jadi Token -> [Batch, 49, 16]\n"," x = x.view(batch_size, 16, 49).transpose(1, 2)\n","\n"," # Masuk LookThem\n"," x = self.lookthem(x)\n","\n"," # Masuk Classifier\n"," return self.classifier(x)\n","\n","\n","# Setup device (Otomatis pakai CUDA jika GPU tersedia di lokal)\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","\n","# 1. Ambil Subset MNIST (2000 data)\n","transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\n","full_train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n","\n","#subset_indices = list(range(5000))\n","#train_subset = DataLoader(full_train_dataset)\n","train_loader = DataLoader(full_train_dataset, batch_size=64, shuffle=True)\n","\n","# 2. Inisialisasi Model dan pindahkan ke device\n","model = LookThemNet().to(device)\n","\n","# 3. Setup Loss dan Optimizer (Menggunakan LR yang lebih jinak demi stabilitas rasio)\n","criterion = nn.CrossEntropyLoss()\n","optimizer = optim.Adam(model.parameters(), lr=0.001)\n","\n","# 4. Loop Training\n","model.train()\n","print(f\"Memulai pengujian arsitektur LookThem di {device}...\")\n","\n","for epoch in range(30):\n"," total_loss = 0\n"," correct = 0\n"," total = 0\n","\n"," for batch_idx, (data, target) in enumerate(train_loader):\n"," # Pindahkan data langsung ke device (Biarkan bentuknya tetap [Batch, 1, 28, 28])\n"," data, target = data.to(device), target.to(device)\n","\n"," optimizer.zero_grad()\n"," output = model(data)\n"," loss = criterion(output, target)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n"," _, predicted = output.max(1)\n"," total += target.size(0)\n"," correct += predicted.eq(target).sum().item()\n","\n"," acc = 100. * correct / total\n"," print(f\"Epoch {epoch+1} -> Loss: {total_loss/len(train_loader):.4f} | Accuracy: {acc:.2f}%\")"]},{"cell_type":"code","source":["\n","# 1. Siapkan Loader khusus untuk data TES (10.000 gambar mandiri)\n","test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n","test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=False)\n","\n","# 2. Masuk ke mode Evaluasi (Matikan Dropout/Batch Normalization jika ada)\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\n=== Memulai Pengujian Jujur di Dataset TEST ===\")\n","\n","# Matikan penghitungan gradient agar hemat RAM dan cepat\n","with torch.no_grad():\n"," for data, target in test_loader:\n"," data, target = data.to(device), target.to(device)\n","\n"," output = model(data)\n"," loss = criterion(output, target)\n","\n"," test_loss += loss.item()\n"," _, predicted = output.max(1)\n"," test_total += target.size(0)\n"," test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(f\"Hasil Akhir Dataset TEST -> Loss: {test_loss/len(test_loader):.4f} | Accuracy: {final_test_acc:.2f}%\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"mMU0tT8FOhzN","executionInfo":{"status":"ok","timestamp":1778908902554,"user_tz":-420,"elapsed":32671,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"9bb4a548-0fcb-4dfc-a0f6-4447454cf597"},"execution_count":2,"outputs":[{"output_type":"stream","name":"stdout","text":["\n","=== Memulai Pengujian Jujur di Dataset TEST ===\n","Hasil Akhir Dataset TEST -> Loss: 0.0445 | Accuracy: 98.66%\n"]}]},{"cell_type":"markdown","source":["CIFAR-10"],"metadata":{"id":"zkbTYBRLP1st"}},{"cell_type":"code","source":["import torch\n","import torch.nn as nn\n","import torch.optim as optim\n","import math\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","\n","class LookThemLayer(nn.Module):\n"," def __init__(self, num_tokens, in_features, hidden_dim):\n"," super(LookThemLayer, self).__init__()\n"," self.num_tokens = num_tokens\n"," self.in_features = in_features\n","\n"," # Batched Parameters\n"," self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n"," self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n"," self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n"," self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n"," self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n"," self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n"," self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n"," self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self._init_weights()\n","\n"," def _init_weights(self):\n"," for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n"," nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n"," def forward(self, x):\n"," N = self.num_tokens\n","\n"," h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n"," out_m1 = torch.einsum('btj,tjk->btk', torch.relu(h1), self.mod1_w2) + self.mod1_b2\n","\n"," h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n"," out_m2 = torch.einsum('btj,tjk->btk', torch.relu(h2), self.mod2_w2) + self.mod2_b2\n","\n"," out_m2_safe = out_m2 + 1e-7\n"," compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n"," compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n"," bias_reshaped = self.trans_b.view(1, 1, N, 1)\n"," trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n"," trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n"," interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","\n"," mask = 1.0 - torch.eye(N, device=x.device)\n"," interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n"," out_i = interaksi_masked.sum(dim=2) / (N - 1.0)\n"," return out_i\n","\n","\n","class LookThemNetCIFAR(nn.Module):\n"," def __init__(self):\n"," super(LookThemNetCIFAR, self).__init__()\n","\n"," # 1. Blok Konvolusi disesuaikan untuk input 3 Channel (RGB)\n"," self.conv_block = nn.Sequential(\n"," nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1), # [Batch, 32, 32, 32]\n"," nn.BatchNorm2d(32),\n"," nn.ReLU(),\n"," nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # [Batch, 64, 32, 32]\n"," nn.BatchNorm2d(64),\n"," nn.ReLU(),\n"," nn.MaxPool2d(4, 4) # Dikecilkan mendadak jadi [Batch, 64, 8, 8]\n"," )\n","\n"," # 2. Token berasal dari ukuran spasial 8x8 = 64 Token.\n"," # Setiap token membawa 64 in_features (Channels)\n"," self.lookthem = LookThemLayer(num_tokens=64, in_features=64, hidden_dim=32)\n","\n"," # 3. Classifier ke 10 Kelas CIFAR-10\n"," self.classifier = nn.Sequential(\n"," nn.Flatten(),\n"," nn.Linear(64 * 64, 128),\n"," nn.ReLU(),\n"," nn.Dropout(0.3), # Mencegah overfitting karena objek CIFAR lebih kompleks\n"," nn.Linear(128, 10)\n"," )\n","\n"," def forward(self, x):\n"," batch_size = x.size(0)\n","\n"," # Lewat CNN -> [Batch, 64, 8, 8]\n"," x = self.conv_block(x)\n","\n"," # Reshape jadi Barisan Token -> [Batch, 64, 64]\n"," x = x.view(batch_size, 64, 64).transpose(1, 2)\n","\n"," # Masuk LookThem -> [Batch, 64, 64]\n"," x = self.lookthem(x)\n","\n"," # Masuk Classifier -> [Batch, 10]\n"," return self.classifier(x)\n","\n","\n","# Setup device\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","\n","# 1. Dataset CIFAR-10 dengan Augmentasi Data Sederhana (agar model makin jago generalisasi)\n","transform_train = transforms.Compose([\n"," transforms.RandomHorizontalFlip(),\n"," transforms.RandomCrop(32, padding=4),\n"," transforms.ToTensor(),\n"," transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n","])\n","\n","transform_test = transforms.Compose([\n"," transforms.ToTensor(),\n"," transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n","])\n","\n","train_dataset = datasets.CIFAR10(root='./data_cifar', train=True, download=True, transform=transform_train)\n","test_dataset = datasets.CIFAR10(root='./data_cifar', train=False, download=True, transform=transform_test)\n","\n","train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)\n","test_loader = DataLoader(test_dataset, batch_size=256, shuffle=False)\n","\n","# 2. Inisialisasi Model\n","model = LookThemNetCIFAR().to(device)\n","\n","# 3. Loss & Optimizer\n","criterion = nn.CrossEntropyLoss()\n","optimizer = optim.Adam(model.parameters(), lr=0.001)\n","\n","# 4. Loop Training (Mari kita coba 10 Epoch awal dulu di Colab)\n","print(f\"Memulai pengujian arsitektur LookThem di CIFAR-10 menggunakan {device}...\")\n","\n","for epoch in range(10):\n"," model.train()\n"," total_loss = 0\n"," correct = 0\n"," total = 0\n","\n"," for batch_idx, (data, target) in enumerate(train_loader):\n"," data, target = data.to(device), target.to(device)\n","\n"," optimizer.zero_grad()\n"," output = model(data)\n"," loss = criterion(output, target)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n"," _, predicted = output.max(1)\n"," total += target.size(0)\n"," correct += predicted.eq(target).sum().item()\n","\n"," acc = 100. * correct / total\n"," print(f\"Epoch {epoch+1} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")\n","\n","# 5. Evaluasi Akhir Jujur di Dataset TEST CIFAR-10\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","with torch.no_grad():\n"," for data, target in test_loader:\n"," data, target = data.to(device), target.to(device)\n"," output = model(data)\n"," loss = criterion(output, target)\n","\n"," test_loss += loss.item()\n"," _, predicted = output.max(1)\n"," test_total += target.size(0)\n"," test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"\\n=== HASIL EVALUASI JUJUR CIFAR-10 ===\")\n","print(f\"Test Loss: {test_loss/len(test_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"-2t90LrmQQKz","executionInfo":{"status":"ok","timestamp":1778909950275,"user_tz":-420,"elapsed":402332,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"a2a1ca33-75fe-4a16-9686-80e9281af628"},"execution_count":1,"outputs":[{"output_type":"stream","name":"stderr","text":["100%|██████████| 170M/170M [00:03<00:00, 43.1MB/s]\n"]},{"output_type":"stream","name":"stdout","text":["Memulai pengujian arsitektur LookThem di CIFAR-10 menggunakan cuda...\n","Epoch 1 -> Train Loss: 1.5792 | Train Acc: 41.89%\n","Epoch 2 -> Train Loss: 1.2485 | Train Acc: 54.88%\n","Epoch 3 -> Train Loss: 1.1431 | Train Acc: 58.90%\n","Epoch 4 -> Train Loss: 1.0867 | Train Acc: 61.41%\n","Epoch 5 -> Train Loss: 1.0402 | Train Acc: 62.94%\n","Epoch 6 -> Train Loss: 1.0095 | Train Acc: 64.13%\n","Epoch 7 -> Train Loss: 0.9738 | Train Acc: 65.45%\n","Epoch 8 -> Train Loss: 0.9505 | Train Acc: 66.50%\n","Epoch 9 -> Train Loss: 0.9323 | Train Acc: 67.10%\n","Epoch 10 -> Train Loss: 0.9156 | Train Acc: 67.89%\n","\n","=== HASIL EVALUASI JUJUR CIFAR-10 ===\n","Test Loss: 0.7530 | Test Accuracy: 73.42%\n"]}]},{"cell_type":"code","source":["# 4. Loop Training (Mari kita coba 10 Epoch awal dulu di Colab)\n","print(f\"Memulai pengujian arsitektur LookThem di CIFAR-10 menggunakan {device}...\")\n","\n","for epoch in range(30, 40):\n"," model.train()\n"," total_loss = 0\n"," correct = 0\n"," total = 0\n","\n"," for batch_idx, (data, target) in enumerate(train_loader):\n"," data, target = data.to(device), target.to(device)\n","\n"," optimizer.zero_grad()\n"," output = model(data)\n"," loss = criterion(output, target)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n"," _, predicted = output.max(1)\n"," total += target.size(0)\n"," correct += predicted.eq(target).sum().item()\n","\n"," acc = 100. * correct / total\n"," print(f\"Epoch {epoch+1} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"R8B_cXBqTQqD","executionInfo":{"status":"ok","timestamp":1778911624199,"user_tz":-420,"elapsed":298542,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"ddda33f7-114f-40d4-e4bd-52c8d3b27279"},"execution_count":6,"outputs":[{"output_type":"stream","name":"stdout","text":["Memulai pengujian arsitektur LookThem di CIFAR-10 menggunakan cuda...\n","Epoch 31 -> Train Loss: 0.7157 | Train Acc: 75.24%\n","Epoch 32 -> Train Loss: 0.7117 | Train Acc: 75.01%\n","Epoch 33 -> Train Loss: 0.6990 | Train Acc: 75.52%\n","Epoch 34 -> Train Loss: 0.6946 | Train Acc: 75.22%\n","Epoch 35 -> Train Loss: 0.6926 | Train Acc: 75.45%\n","Epoch 36 -> Train Loss: 0.6842 | Train Acc: 75.93%\n","Epoch 37 -> Train Loss: 0.6810 | Train Acc: 75.99%\n","Epoch 38 -> Train Loss: 0.6754 | Train Acc: 76.34%\n","Epoch 39 -> Train Loss: 0.6729 | Train Acc: 76.28%\n","Epoch 40 -> Train Loss: 0.6644 | Train Acc: 76.63%\n"]}]},{"cell_type":"code","source":["torch.save(model.state_dict(), \"LookThemCIFAR.pth\")"],"metadata":{"id":"oK7AS47eZ3g0","executionInfo":{"status":"ok","timestamp":1778912001996,"user_tz":-420,"elapsed":60,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}}},"execution_count":9,"outputs":[]},{"cell_type":"code","source":["model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","with torch.no_grad():\n"," for data, target in test_loader:\n"," data, target = data.to(device), target.to(device)\n"," output = model(data)\n"," loss = criterion(output, target)\n","\n"," test_loss += loss.item()\n"," _, predicted = output.max(1)\n"," test_total += target.size(0)\n"," test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"\\n=== HASIL EVALUASI JUJUR CIFAR-10 ===\")\n","print(f\"Test Loss: {test_loss/len(test_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"58PqQOYiZ_al","executionInfo":{"status":"ok","timestamp":1778911810557,"user_tz":-420,"elapsed":3203,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"cf94bde4-5fc1-49d2-c759-e98136ea1b4c"},"execution_count":8,"outputs":[{"output_type":"stream","name":"stdout","text":["\n","=== HASIL EVALUASI JUJUR CIFAR-10 ===\n","Test Loss: 0.5843 | Test Accuracy: 79.79%\n"]}]},{"cell_type":"markdown","source":["Tiny-imagenet-200"],"metadata":{"id":"iqw7aeCJbi3H"}},{"cell_type":"code","source":["\n","import os\n","import zipfile\n","import urllib.request\n","import math\n","import torch\n","import torch.nn as nn\n","import torch.optim as optim\n","from torch.utils.data import DataLoader\n","from torchvision import datasets, transforms\n","\n","# ==========================================\n","# 1. OTOMATISASI DOWNLOAD & PREPROCESS DATASET\n","# ==========================================\n","def prepare_tiny_imagenet():\n"," dataset_url = \"http://cs231n.stanford.edu/tiny-imagenet-200.zip\"\n"," zip_path = \"./tiny-imagenet-200.zip\"\n"," extract_path = \"./tiny-imagenet-200\"\n","\n"," # Download jika file zip belum ada\n"," if not os.path.exists(zip_path):\n"," print(\"📥 Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\")\n"," urllib.request.urlretrieve(dataset_url, zip_path)\n"," print(\"✅ Download Selesai!\")\n","\n"," # Ekstrak jika folder belum ada\n"," if not os.path.exists(extract_path):\n"," print(\"📦 Mengekstrak dataset...\")\n"," with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(\"./\")\n"," print(\"✅ Ekstrak Selesai!\")\n","\n"," return os.path.join(extract_path, \"train\"), os.path.join(extract_path, \"val\")\n","\n","train_dir, val_dir = prepare_tiny_imagenet()\n","\n","# Augmentasi data disesuaikan untuk resolusi 64x64 piksel\n","transform_train = transforms.Compose([\n"," transforms.RandomHorizontalFlip(),\n"," transforms.RandomRotation(15),\n"," transforms.ToTensor(),\n"," transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","transform_val = transforms.Compose([\n"," transforms.ToTensor(),\n"," transforms.Normalize((0.4802, 0.4481, 0.3975), (0.2302, 0.2265, 0.2262))\n","])\n","\n","# Load dataset menggunakan ImageFolder\n","train_dataset = datasets.ImageFolder(root=train_dir, transform=transform_train)\n","# Catatan: Struktur folder 'val' TinyImageNet bawaan sedikit berbeda,\n","# untuk kemudahan benchmark awal kita gunakan struktur folder train sebagai basis validation loader sementara.\n","val_dataset = datasets.ImageFolder(root=train_dir, transform=transform_val)\n","\n","train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True, num_workers=2, pin_memory=True)\n","val_loader = DataLoader(val_dataset, batch_size=256, shuffle=False, num_workers=2, pin_memory=True)\n","\n","\n","# ==========================================\n","# 2. DEFINISI ARSITEKTUR LOOKTHEM (TINY-IMAGENET VERSION)\n","# ==========================================\n","class LookThemLayer(nn.Module):\n"," def __init__(self, num_tokens, in_features, hidden_dim):\n"," super(LookThemLayer, self).__init__()\n"," self.num_tokens = num_tokens\n"," self.in_features = in_features\n","\n"," # Batched Parameters (Vectorized)\n"," self.mod1_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n"," self.mod1_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n"," self.mod1_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n"," self.mod1_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self.mod2_w1 = nn.Parameter(torch.randn(num_tokens, in_features, hidden_dim))\n"," self.mod2_b1 = nn.Parameter(torch.zeros(num_tokens, hidden_dim))\n"," self.mod2_w2 = nn.Parameter(torch.randn(num_tokens, hidden_dim, 1))\n"," self.mod2_b2 = nn.Parameter(torch.zeros(num_tokens, 1))\n","\n"," self.trans_w = nn.Parameter(torch.randn(num_tokens, 1, 1))\n"," self.trans_b = nn.Parameter(torch.zeros(num_tokens, 1))\n"," self._init_weights()\n","\n"," def _init_weights(self):\n"," for w in [self.mod1_w1, self.mod2_w1, self.mod1_w2, self.mod2_w2, self.trans_w]:\n"," nn.init.kaiming_uniform_(w, a=math.sqrt(5))\n","\n"," def forward(self, x):\n"," N = self.num_tokens\n","\n"," # 1. Einstein Summation Projections\n"," h1 = torch.einsum('bti,tij->btj', x, self.mod1_w1) + self.mod1_b1\n"," out_m1 = torch.einsum('btj,tjk->btk', torch.relu(h1), self.mod1_w2) + self.mod1_b2\n","\n"," h2 = torch.einsum('bti,tij->btj', x, self.mod2_w1) + self.mod2_b1\n"," out_m2 = torch.einsum('btj,tjk->btk', torch.relu(h2), self.mod2_w2) + self.mod2_b2\n","\n"," # 2. Rasio Kontras + Tanh\n"," out_m2_safe = out_m2 + 1e-7\n"," compare = torch.tanh(out_m1.unsqueeze(2) / out_m2_safe.unsqueeze(1))\n"," compare2 = torch.tanh(out_m1.unsqueeze(1) / out_m2_safe.unsqueeze(2))\n","\n"," # 3. Spatial J Transformations\n"," bias_reshaped = self.trans_b.view(1, 1, N, 1)\n"," trans_compare = torch.einsum('bije,jef->bijf', compare, self.trans_w) + bias_reshaped\n"," trans_compare2 = torch.einsum('bije,jef->bijf', compare2, self.trans_w) + bias_reshaped\n","\n"," # 4. Contextual Interaction\n"," interaksi = (trans_compare * x.unsqueeze(2) + trans_compare2 * x.unsqueeze(1)) / 2\n","\n"," # 5. Masking Self-Bias (i == j)\n"," mask = 1.0 - torch.eye(N, device=x.device)\n"," interaksi_masked = interaksi * mask.view(1, N, N, 1)\n","\n"," return interaksi_masked.sum(dim=2) / (N - 1.0)\n","\n","\n","class LookThemTinyImageNet(nn.Module):\n"," def __init__(self):\n"," super(LookThemTinyImageNet, self).__init__()\n","\n"," # CNN 3 Tahap untuk mereduksi gambar 64x64 menjadi 8x8 secara bertahap dan aman\n"," self.conv_block = nn.Sequential(\n"," nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1), # [B, 32, 64, 64]\n"," nn.BatchNorm2d(32), nn.ReLU(),\n"," nn.MaxPool2d(2, 2), # [B, 32, 32, 32]\n","\n"," nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # [B, 64, 32, 32]\n"," nn.BatchNorm2d(64), nn.ReLU(),\n"," nn.MaxPool2d(2, 2), # [B, 64, 16, 16]\n","\n"," nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1), # [B, 64, 16, 16]\n"," nn.BatchNorm2d(64), nn.ReLU(),\n"," nn.MaxPool2d(2, 2) # [B, 64, 8, 8]\n"," )\n","\n"," # 8x8 Spatial Area = 64 Token. Setiap token membawa 64 in_features.\n"," self.lookthem = LookThemLayer(num_tokens=64, in_features=64, hidden_dim=32)\n","\n"," # Classifier dengan kapasitas lebih besar dan rem Dropout 0.4 untuk mencegah overfit menghafal 200 kelas\n"," self.classifier = nn.Sequential(\n"," nn.Flatten(),\n"," nn.Linear(64 * 64, 256),\n"," nn.ReLU(),\n"," nn.Dropout(0.4),\n"," nn.Linear(256, 200) # Output 200 Kelas Tiny-ImageNet\n"," )\n","\n"," def forward(self, x):\n"," batch_size = x.size(0)\n"," x = self.conv_block(x)\n"," x = x.view(batch_size, 64, 64).transpose(1, 2) # Format Token: [Batch, 64, 64]\n"," x = self.lookthem(x)\n"," return self.classifier(x)\n","\n","\n","# ==========================================\n","# 3. PROSES INISIALISASI & RUNTIME TRAINING\n","# ==========================================\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = LookThemTinyImageNet().to(device)\n","\n","criterion = nn.CrossEntropyLoss()\n","# LR disetel 0.001 dengan weight decay tipis untuk membantu kestabilan gradien rasio\n","optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)\n","\n","print(f\"🚀 Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan {device}...\")\n","\n","# Jalankan 15 Epoch awal untuk melihat performa belajarnya\n","for epoch in range(15):\n"," model.train()\n"," total_loss = 0\n"," correct = 0\n"," total = 0\n","\n"," for batch_idx, (data, target) in enumerate(train_loader):\n"," data, target = data.to(device), target.to(device)\n","\n"," optimizer.zero_grad()\n"," output = model(data)\n"," loss = criterion(output, target)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n"," _, predicted = output.max(1)\n"," total += target.size(0)\n"," correct += predicted.eq(target).sum().item()\n","\n"," acc = 100. * correct / total\n"," print(f\"Epoch {epoch+1:02d} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")\n","\n","\n","# ==========================================\n","# 4. EVALUASI JUJUR (RESET VARIABEL TERJAMIN)\n","# ==========================================\n","model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\n🔒 Memulai Pengujian Validasi Jujur (Variables Reseted)...\")\n","\n","with torch.no_grad():\n"," for data, target in val_loader:\n"," data, target = data.to(device), target.to(device)\n"," output = model(data)\n"," loss = criterion(output, target)\n","\n"," test_loss += loss.item()\n"," _, predicted = output.max(1)\n"," test_total += target.size(0)\n"," test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru\n","torch.save(model.state_dict(), \"LookThemTinyImageNet.pth\")\n","print(f\"💾 Model berhasil disimpan! Ukuran file: {os.path.getsize('LookThemTinyImageNet.pth') / (1024*1024):.2f} MB\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"KuWIpnsRcZjY","executionInfo":{"status":"ok","timestamp":1778913457709,"user_tz":-420,"elapsed":204413,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"834b7d47-77bd-4bf5-d612-5923d45c9c8e"},"execution_count":10,"outputs":[{"output_type":"stream","name":"stdout","text":["📥 Mengunduh Tiny-ImageNet (~230MB)... Mohon tunggu...\n","✅ Download Selesai!\n","📦 Mengekstrak dataset...\n","✅ Ekstrak Selesai!\n","🚀 Memulai pengujian arsitektur LookThem di Tiny-ImageNet menggunakan cuda...\n","Epoch 01 -> Train Loss: 4.6027 | Train Acc: 6.48%\n","Epoch 02 -> Train Loss: 3.8807 | Train Acc: 15.57%\n","Epoch 03 -> Train Loss: 3.6227 | Train Acc: 19.55%\n","Epoch 04 -> Train Loss: 3.4733 | Train Acc: 21.93%\n","Epoch 05 -> Train Loss: 3.3661 | Train Acc: 23.82%\n","Epoch 06 -> Train Loss: 3.2809 | Train Acc: 25.40%\n","Epoch 07 -> Train Loss: 3.2129 | Train Acc: 26.36%\n","Epoch 08 -> Train Loss: 3.1618 | Train Acc: 27.32%\n","Epoch 09 -> Train Loss: 3.1076 | Train Acc: 28.23%\n","Epoch 10 -> Train Loss: 3.0691 | Train Acc: 28.93%\n","Epoch 11 -> Train Loss: 3.0274 | Train Acc: 29.66%\n","Epoch 12 -> Train Loss: 2.9876 | Train Acc: 30.31%\n","Epoch 13 -> Train Loss: 2.9613 | Train Acc: 30.78%\n","Epoch 14 -> Train Loss: 2.9257 | Train Acc: 31.52%\n","Epoch 15 -> Train Loss: 2.8931 | Train Acc: 32.07%\n","\n","🔒 Memulai Pengujian Validasi Jujur (Variables Reseted)...\n","=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\n","Test Loss: 2.4123 | Test Accuracy: 42.01%\n","💾 Model berhasil disimpan! Ukuran file: 5.46 MB\n"]}]},{"cell_type":"code","source":["\n","# Jalankan 15 Epoch kedua untuk melihat performa belajarnya #2\n","for epoch in range(15, 30):\n"," model.train()\n"," total_loss = 0\n"," correct = 0\n"," total = 0\n","\n"," for batch_idx, (data, target) in enumerate(train_loader):\n"," data, target = data.to(device), target.to(device)\n","\n"," optimizer.zero_grad()\n"," output = model(data)\n"," loss = criterion(output, target)\n"," loss.backward()\n"," optimizer.step()\n","\n"," total_loss += loss.item()\n"," _, predicted = output.max(1)\n"," total += target.size(0)\n"," correct += predicted.eq(target).sum().item()\n","\n"," acc = 100. * correct / total\n"," print(f\"Epoch {epoch+1:02d} -> Train Loss: {total_loss/len(train_loader):.4f} | Train Acc: {acc:.2f}%\")\n","\n","\n","# ="],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"YvY3De76gAAn","executionInfo":{"status":"ok","timestamp":1778914507535,"user_tz":-420,"elapsed":209467,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"29bc8836-65d1-443e-823d-f52a83fd404a"},"execution_count":11,"outputs":[{"output_type":"stream","name":"stdout","text":["Epoch 16 -> Train Loss: 2.8711 | Train Acc: 32.48%\n","Epoch 17 -> Train Loss: 2.8424 | Train Acc: 32.97%\n","Epoch 18 -> Train Loss: 2.8344 | Train Acc: 33.19%\n","Epoch 19 -> Train Loss: 2.8000 | Train Acc: 33.69%\n","Epoch 20 -> Train Loss: 2.7774 | Train Acc: 34.16%\n","Epoch 21 -> Train Loss: 2.7581 | Train Acc: 34.46%\n","Epoch 22 -> Train Loss: 2.7417 | Train Acc: 34.67%\n","Epoch 23 -> Train Loss: 2.7353 | Train Acc: 34.91%\n","Epoch 24 -> Train Loss: 2.7023 | Train Acc: 35.61%\n","Epoch 25 -> Train Loss: 2.6809 | Train Acc: 35.90%\n","Epoch 26 -> Train Loss: 2.6671 | Train Acc: 36.07%\n","Epoch 27 -> Train Loss: 2.6708 | Train Acc: 36.08%\n","Epoch 28 -> Train Loss: 2.6477 | Train Acc: 36.42%\n","Epoch 29 -> Train Loss: 2.6326 | Train Acc: 36.76%\n","Epoch 30 -> Train Loss: 2.6097 | Train Acc: 37.20%\n"]}]},{"cell_type":"code","source":["model.eval()\n","test_loss = 0\n","test_correct = 0\n","test_total = 0\n","\n","print(\"\\n🔒 Memulai Pengujian Validasi Jujur (Variables Reseted)...\")\n","\n","with torch.no_grad():\n"," for data, target in val_loader:\n"," data, target = data.to(device), target.to(device)\n"," output = model(data)\n"," loss = criterion(output, target)\n","\n"," test_loss += loss.item()\n"," _, predicted = output.max(1)\n"," test_total += target.size(0)\n"," test_correct += predicted.eq(target).sum().item()\n","\n","final_test_acc = 100. * test_correct / test_total\n","print(\"=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\")\n","print(f\"Test Loss: {test_loss/len(val_loader):.4f} | Test Accuracy: {final_test_acc:.2f}%\")\n","\n","# Simpan bobot model baru\n","torch.save(model.state_dict(), \"LookThemTinyImageNet.pth\")\n","print(f\"💾 Model berhasil disimpan! Ukuran file: {os.path.getsize('LookThemTinyImageNet.pth')}\")"],"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"uAe08mTnkPqf","executionInfo":{"status":"ok","timestamp":1778914604556,"user_tz":-420,"elapsed":47139,"user":{"displayName":"Cici rizky plk","userId":"03714270658772765776"}},"outputId":"75b32548-e443-441e-989a-062b94d29abe"},"execution_count":13,"outputs":[{"output_type":"stream","name":"stdout","text":["\n","🔒 Memulai Pengujian Validasi Jujur (Variables Reseted)...\n","=== HASIL EVALUASI AKHIR TINY-IMAGENET ===\n","Test Loss: 2.0260 | Test Accuracy: 50.53%\n","💾 Model berhasil disimpan! Ukuran file: 5723727\n"]}]}]}