{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.13","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":13013760,"sourceType":"datasetVersion","datasetId":8239053},{"sourceId":13178251,"sourceType":"datasetVersion","datasetId":8350960},{"sourceId":13287683,"sourceType":"datasetVersion","datasetId":8421324}],"dockerImageVersionId":31154,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# ======================================\n# 1. Cài đặt thư viện\n# ======================================\n!pip install torch torchvision tensorboardX tqdm opencv-python","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:01:50.003612Z","iopub.execute_input":"2025-10-14T14:01:50.003886Z","iopub.status.idle":"2025-10-14T14:01:53.229618Z","shell.execute_reply.started":"2025-10-14T14:01:50.003868Z","shell.execute_reply":"2025-10-14T14:01:53.228790Z"}},"outputs":[{"name":"stdout","text":"Requirement already satisfied: torch in /usr/local/lib/python3.11/dist-packages (2.6.0+cu124)\nRequirement already satisfied: torchvision in /usr/local/lib/python3.11/dist-packages (0.21.0+cu124)\nRequirement already satisfied: tensorboardX in /usr/local/lib/python3.11/dist-packages (2.6.4)\nRequirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (4.67.1)\nRequirement already satisfied: opencv-python in /usr/local/lib/python3.11/dist-packages (4.12.0.88)\nRequirement already satisfied: filelock in /usr/local/lib/python3.11/dist-packages (from torch) (3.19.1)\nRequirement already satisfied: typing-extensions>=4.10.0 in /usr/local/lib/python3.11/dist-packages (from torch) (4.15.0)\nRequirement already satisfied: networkx in /usr/local/lib/python3.11/dist-packages (from torch) (3.5)\nRequirement already satisfied: jinja2 in /usr/local/lib/python3.11/dist-packages (from torch) (3.1.6)\nRequirement already satisfied: fsspec in /usr/local/lib/python3.11/dist-packages (from torch) (2025.9.0)\nRequirement already satisfied: nvidia-cuda-nvrtc-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch) (12.4.127)\nRequirement already satisfied: nvidia-cuda-runtime-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch) (12.4.127)\nRequirement already satisfied: nvidia-cuda-cupti-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch) (12.4.127)\nRequirement already satisfied: nvidia-cudnn-cu12==9.1.0.70 in /usr/local/lib/python3.11/dist-packages (from torch) (9.1.0.70)\nRequirement already satisfied: nvidia-cublas-cu12==12.4.5.8 in /usr/local/lib/python3.11/dist-packages (from torch) (12.4.5.8)\nRequirement already satisfied: nvidia-cufft-cu12==11.2.1.3 in /usr/local/lib/python3.11/dist-packages (from torch) (11.2.1.3)\nRequirement already satisfied: nvidia-curand-cu12==10.3.5.147 in /usr/local/lib/python3.11/dist-packages (from torch) (10.3.5.147)\nRequirement already satisfied: nvidia-cusolver-cu12==11.6.1.9 in /usr/local/lib/python3.11/dist-packages (from torch) (11.6.1.9)\nRequirement already satisfied: nvidia-cusparse-cu12==12.3.1.170 in /usr/local/lib/python3.11/dist-packages (from torch) (12.3.1.170)\nRequirement already satisfied: nvidia-cusparselt-cu12==0.6.2 in /usr/local/lib/python3.11/dist-packages (from torch) (0.6.2)\nRequirement already satisfied: nvidia-nccl-cu12==2.21.5 in /usr/local/lib/python3.11/dist-packages (from torch) (2.21.5)\nRequirement already satisfied: nvidia-nvtx-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch) (12.4.127)\nRequirement already satisfied: nvidia-nvjitlink-cu12==12.4.127 in /usr/local/lib/python3.11/dist-packages (from torch) (12.4.127)\nRequirement already satisfied: triton==3.2.0 in /usr/local/lib/python3.11/dist-packages (from torch) (3.2.0)\nRequirement already satisfied: sympy==1.13.1 in /usr/local/lib/python3.11/dist-packages (from torch) (1.13.1)\nRequirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from sympy==1.13.1->torch) (1.3.0)\nRequirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from torchvision) (2.2.6)\nRequirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.11/dist-packages (from torchvision) (11.3.0)\nRequirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from tensorboardX) (25.0)\nRequirement already satisfied: protobuf>=3.20 in /usr/local/lib/python3.11/dist-packages (from tensorboardX) (3.20.3)\nRequirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.11/dist-packages (from jinja2->torch) (3.0.2)\n","output_type":"stream"}],"execution_count":1},{"cell_type":"markdown","source":"# Training","metadata":{}},{"cell_type":"code","source":"import os\nimport cv2\nimport torch\nimport random\nimport numpy as np\nimport torch.nn as nn\nimport torch.optim as optim\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, models\n\n# ===============================\n# 1. Config\n# ===============================\nDATA_DIR = \"/kaggle/input/10labs\"\nBATCH_SIZE = 32\nIMG_SIZE = 128\nEPOCHS = 10\nLR = 1e-4\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# ===============================","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:01:53.231336Z","iopub.execute_input":"2025-10-14T14:01:53.231807Z","iopub.status.idle":"2025-10-14T14:01:56.005813Z","shell.execute_reply.started":"2025-10-14T14:01:53.231772Z","shell.execute_reply":"2025-10-14T14:01:56.004991Z"}},"outputs":[],"execution_count":2},{"cell_type":"code","source":"# 2. Dataset\n# ===============================\nclass FASDataset(Dataset):\n def __init__(self, root_dir, transform=None):\n self.samples = []\n self.transform = transform\n\n # --- Định nghĩa các lớp ---\n self.classes = [\n \"live\",\n \"cutout\",\n \"mask3d\",\n \"mask\",\n \"monitor\",\n \"outline\",\n \"outline3d\",\n \"PC_Replay\",\n \"Smartphone_Replay\",\n \"Print_Attacks_Samples\"\n ]\n self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}\n\n # --- Duyệt ảnh ---\n # 1️⃣ Ảnh live\n live_folder = os.path.join(root_dir, \"live\")\n for f in os.listdir(live_folder):\n if f.lower().endswith((\".jpg\", \".png\", \".jpeg\")):\n self.samples.append((os.path.join(live_folder, f), self.class_to_idx[\"live\"]))\n\n # 2️⃣ Ảnh spoof (9 loại)\n spoof_root = os.path.join(root_dir, \"spoof\")\n for attack_type in os.listdir(spoof_root):\n attack_folder = os.path.join(spoof_root, attack_type)\n if not os.path.isdir(attack_folder):\n continue\n if attack_type not in self.class_to_idx:\n print(f\"⚠️ Warning: Unknown attack type '{attack_type}' -> Skipped\")\n continue\n for f in os.listdir(attack_folder):\n if f.lower().endswith((\".jpg\", \".png\", \".jpeg\")):\n self.samples.append((os.path.join(attack_folder, f), self.class_to_idx[attack_type]))\n\n def __len__(self):\n return len(self.samples)\n\n def __getitem__(self, idx):\n path, label = self.samples[idx]\n img = cv2.imread(path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if self.transform:\n img = self.transform(img)\n return img, torch.tensor(label, dtype=torch.long)\n\n\n# ===============================","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:01:56.006753Z","iopub.execute_input":"2025-10-14T14:01:56.007084Z","iopub.status.idle":"2025-10-14T14:01:56.016566Z","shell.execute_reply.started":"2025-10-14T14:01:56.007066Z","shell.execute_reply":"2025-10-14T14:01:56.015840Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"# 3. Transform & Split\n# ===============================\ntransform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Resize((IMG_SIZE, IMG_SIZE)),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])\n])\n\ndataset = FASDataset(DATA_DIR, transform=transform)\nn_total = len(dataset)\nn_train = int(0.7 * n_total)\nn_val = n_total - n_train\ntrain_set, val_set = torch.utils.data.random_split(dataset, [n_train, n_val])\n\ntrain_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)\nval_loader = DataLoader(val_set, batch_size=BATCH_SIZE, shuffle=False)\n\nprint(f\"Train samples: {len(train_set)}, Val samples: {len(val_set)}\")\n\n# ===============================\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:01:56.018211Z","iopub.execute_input":"2025-10-14T14:01:56.018500Z","iopub.status.idle":"2025-10-14T14:01:56.056010Z","shell.execute_reply.started":"2025-10-14T14:01:56.018484Z","shell.execute_reply":"2025-10-14T14:01:56.055425Z"}},"outputs":[{"name":"stdout","text":"Train samples: 6465, Val samples: 2772\n","output_type":"stream"}],"execution_count":4},{"cell_type":"code","source":"# 4. Model\n# ===============================\nmodel = models.resnet18(weights=models.ResNet18_Weights.IMAGENET1K_V1)\nmodel.fc = nn.Linear(model.fc.in_features, 10) # 🔹 10 lớp\nmodel = model.to(DEVICE)\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=LR)\n\n# ===============================","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:01:56.056604Z","iopub.execute_input":"2025-10-14T14:01:56.056785Z","iopub.status.idle":"2025-10-14T14:01:56.435941Z","shell.execute_reply.started":"2025-10-14T14:01:56.056770Z","shell.execute_reply":"2025-10-14T14:01:56.435370Z"}},"outputs":[],"execution_count":5},{"cell_type":"code","source":"# 5. Train loop\n# ===============================\nbest_val_acc = 0.0\nsave_dir = \"/kaggle/working/fas_model\"\nos.makedirs(save_dir, exist_ok=True)\n\nfor epoch in range(EPOCHS):\n model.train()\n total_loss, correct, total = 0, 0, 0\n for imgs, labels in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{EPOCHS}\"):\n imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)\n optimizer.zero_grad()\n\n outputs = model(imgs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n total_loss += loss.item()\n preds = torch.argmax(outputs, dim=1)\n correct += (preds == labels).sum().item()\n total += labels.size(0)\n\n train_acc = correct / total\n\n # --- Validation ---\n model.eval()\n val_correct, val_total = 0, 0\n with torch.no_grad():\n for imgs, labels in val_loader:\n imgs, labels = imgs.to(DEVICE), labels.to(DEVICE)\n outputs = model(imgs)\n preds = torch.argmax(outputs, dim=1)\n val_correct += (preds == labels).sum().item()\n val_total += labels.size(0)\n\n val_acc = val_correct / val_total\n print(f\"Epoch {epoch+1}/{EPOCHS} | Loss: {total_loss/len(train_loader):.4f} | \"\n f\"Train Acc: {train_acc:.4f} | Val Acc: {val_acc:.4f}\")\n\n # --- Save best model ---\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n save_path = os.path.join(save_dir, \"resnet18_best_10class.pth\")\n torch.save(model.state_dict(), save_path)\n print(f\"✅ Saved best model at epoch {epoch+1} with Val Acc: {best_val_acc:.4f}\")\n\nprint(f\"🎯 Training done. Best Val Acc: {best_val_acc:.4f}\")","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:01:56.436617Z","iopub.execute_input":"2025-10-14T14:01:56.436801Z","iopub.status.idle":"2025-10-14T14:31:35.200466Z","shell.execute_reply.started":"2025-10-14T14:01:56.436786Z","shell.execute_reply":"2025-10-14T14:31:35.199593Z"}},"outputs":[{"name":"stderr","text":"Epoch 1/10: 100%|██████████| 203/203 [02:53<00:00, 1.17it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 1/10 | Loss: 0.3151 | Train Acc: 0.9149 | Val Acc: 0.9711\n✅ Saved best model at epoch 1 with Val Acc: 0.9711\n","output_type":"stream"},{"name":"stderr","text":"Epoch 2/10: 100%|██████████| 203/203 [02:07<00:00, 1.59it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 2/10 | Loss: 0.1096 | Train Acc: 0.9720 | Val Acc: 0.9747\n✅ Saved best model at epoch 2 with Val Acc: 0.9747\n","output_type":"stream"},{"name":"stderr","text":"Epoch 3/10: 100%|██████████| 203/203 [02:05<00:00, 1.62it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 3/10 | Loss: 0.0967 | Train Acc: 0.9742 | Val Acc: 0.9661\n","output_type":"stream"},{"name":"stderr","text":"Epoch 4/10: 100%|██████████| 203/203 [02:05<00:00, 1.62it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 4/10 | Loss: 0.0907 | Train Acc: 0.9754 | Val Acc: 0.9711\n","output_type":"stream"},{"name":"stderr","text":"Epoch 5/10: 100%|██████████| 203/203 [01:54<00:00, 1.78it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 5/10 | Loss: 0.0958 | Train Acc: 0.9762 | Val Acc: 0.9708\n","output_type":"stream"},{"name":"stderr","text":"Epoch 6/10: 100%|██████████| 203/203 [02:03<00:00, 1.64it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 6/10 | Loss: 0.0860 | Train Acc: 0.9763 | Val Acc: 0.9722\n","output_type":"stream"},{"name":"stderr","text":"Epoch 7/10: 100%|██████████| 203/203 [01:54<00:00, 1.77it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 7/10 | Loss: 0.0871 | Train Acc: 0.9771 | Val Acc: 0.9719\n","output_type":"stream"},{"name":"stderr","text":"Epoch 8/10: 100%|██████████| 203/203 [01:59<00:00, 1.70it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 8/10 | Loss: 0.0880 | Train Acc: 0.9753 | Val Acc: 0.9722\n","output_type":"stream"},{"name":"stderr","text":"Epoch 9/10: 100%|██████████| 203/203 [01:58<00:00, 1.71it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 9/10 | Loss: 0.0822 | Train Acc: 0.9782 | Val Acc: 0.9708\n","output_type":"stream"},{"name":"stderr","text":"Epoch 10/10: 100%|██████████| 203/203 [01:57<00:00, 1.73it/s]\n","output_type":"stream"},{"name":"stdout","text":"Epoch 10/10 | Loss: 0.0747 | Train Acc: 0.9791 | Val Acc: 0.9690\n🎯 Training done. Best Val Acc: 0.9747\n","output_type":"stream"}],"execution_count":6},{"cell_type":"markdown","source":"# Dự đoán","metadata":{}},{"cell_type":"code","source":"import os\nimport cv2\nimport torch\nimport torch.nn as nn\nimport pandas as pd\nfrom torchvision import transforms, models\nfrom tqdm import tqdm\nfrom PIL import Image\n\n# ========================\n# Config\n# ========================\nFRAME_DIR = \"/kaggle/input/testcv1/fas_test_frames (1)\"\nCSV_PATH = \"/kaggle/input/test-fascsv/publics_test_metadata.csv\"\nMODEL_PATH = \"/kaggle/working/fas_model/resnet18_best_10class.pth\" # 🔹 model 10 class\nOUTPUT_CSV = \"/kaggle/working/predictions_10class.csv\"\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# ========================\n# 1. Class names (phải trùng khi train)\n# ========================\nCLASS_NAMES = [\n \"live\",\n \"cutout\",\n \"mask3d\",\n \"mask\",\n \"monitor\",\n \"outline\",\n \"outline3d\",\n \"PC_Replay\",\n \"Smartphone_Replay\",\n \"Print_Attacks_Samples\"\n]\n\n# ========================\n# 2. Model definition (ResNet18)\n# ========================\nmodel = models.resnet18(weights=None)\nmodel.fc = nn.Linear(model.fc.in_features, len(CLASS_NAMES)) # 🔹 10 lớp\nmodel = model.to(device)\n\n# Load trained weights\nmodel.load_state_dict(torch.load(MODEL_PATH, map_location=device))\nmodel.eval()\n\n# ========================\n# 3. Transform (giống training)\n# ========================\ntransform = transforms.Compose([\n transforms.Resize((128, 128)),\n transforms.ToTensor(),\n transforms.Normalize([0.5], [0.5])\n])\n\n# ========================\n# 4. Load CSV (danh sách frame test)\n# ========================\ndf = pd.read_csv(CSV_PATH)\npredictions = []\n\n# ========================\n# 5. Inference\n# ========================\nfor _, row in tqdm(df.iterrows(), total=len(df)):\n uuid = row[\"uuid\"]\n video_name = row[\"path\"]\n frame_path = os.path.join(FRAME_DIR, f\"{video_name}.jpg\")\n\n if not os.path.exists(frame_path):\n print(f\"⚠️ Không tìm thấy frame: {frame_path}\")\n continue\n\n # Load và tiền xử lý ảnh\n img = cv2.imread(frame_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n img = transform(img).unsqueeze(0).to(device)\n\n # Dự đoán\n with torch.no_grad():\n logits = model(img)\n pred_idx = torch.argmax(logits, dim=1).item()\n\n # Mapping sang tên lớp\n pred_label = CLASS_NAMES[pred_idx]\n\n predictions.append({\n \"uuid\": uuid,\n \"label_pred\": pred_label\n })\n\n# ========================\n# 6. Xuất CSV kết quả\n# ========================\nout_df = pd.DataFrame(predictions)\nout_df.to_csv(OUTPUT_CSV, index=False)\nprint(\"✅ Done. Saved:\", OUTPUT_CSV)\nprint(out_df.head())\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-10-14T14:31:35.201337Z","iopub.execute_input":"2025-10-14T14:31:35.201614Z","iopub.status.idle":"2025-10-14T14:31:39.420207Z","shell.execute_reply.started":"2025-10-14T14:31:35.201584Z","shell.execute_reply":"2025-10-14T14:31:39.419415Z"}},"outputs":[{"name":"stderr","text":"100%|██████████| 170/170 [00:03<00:00, 47.37it/s]\n","output_type":"stream"},{"name":"stdout","text":"✅ Done. Saved: /kaggle/working/predictions_10class.csv\n uuid label_pred\n0 1dee7310-207a-419c-ae58-487e72e066ac live\n1 6f9a1ca2-749c-4af2-bdd3-31a1b13ca980 live\n2 77e7b685-95e8-4e44-8d49-8558aa0fbb78 live\n3 ef74552f-7f40-4abf-8db7-922a8e85ad08 live\n4 58b4974b-0cb8-4c66-ab34-554c214c2573 live\n","output_type":"stream"}],"execution_count":7}]}