{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "60707c2d", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# --- 0. Install Libraries ---\n", "import subprocess\n", "import sys\n", "\n", "def install_if_missing(package):\n", " try:\n", " __import__(package)\n", " except ImportError:\n", " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n", "\n", "packages_to_check = ['transformers', 'openpyxl', 'tqdm']\n", "for package in packages_to_check:\n", " install_if_missing(package)\n", "\n", "# --- 1. Import Libraries ---\n", "import pandas as pd\n", "import torch\n", "import torch.nn as nn\n", "from torch.utils.data import Dataset, DataLoader, random_split\n", "from sklearn.preprocessing import LabelEncoder\n", "from sklearn.metrics import classification_report, confusion_matrix, f1_score, accuracy_score\n", "from sklearn.tree import DecisionTreeClassifier, export_text\n", "from transformers import BertTokenizer, BertModel, get_linear_schedule_with_warmup\n", "import numpy as np\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "from tqdm.auto import tqdm\n", "import warnings\n", "import os\n", "warnings.filterwarnings('ignore')\n", "\n", "# --- 2. Kaggle Environment Detection ---\n", "def setup_kaggle_environment():\n", " is_kaggle = os.environ.get('KAGGLE_KERNEL_RUN_TYPE') is not None\n", " if is_kaggle:\n", " print(\"Kaggle environment detected!\")\n", " input_dir = '/kaggle/input'\n", " working_dir = '/kaggle/working'\n", " dataset_files = []\n", " if os.path.exists(input_dir):\n", " for root, dirs, files in os.walk(input_dir):\n", " for file in files:\n", " if file.endswith(('.xlsx', '.csv')):\n", " dataset_files.append(os.path.join(root, file))\n", " print(f\"Available dataset files: {dataset_files}\")\n", " file_path = dataset_files[0] if dataset_files else '/kaggle/input/csic_database.xlsx'\n", " return {'file_path': file_path, 'output_dir': working_dir, 'is_kaggle': True}\n", " else:\n", " print(\"Local environment detected!\")\n", " return {'file_path': './csic_database.xlsx', 'output_dir': './', 'is_kaggle': False}\n", "\n", "env_config = setup_kaggle_environment()\n", "\n", "CONFIG = {\n", " 'file_path': env_config['file_path'],\n", " 'output_dir': env_config['output_dir'],\n", " 'batch_size': 16 if torch.cuda.is_available() else 8,\n", " 'max_length': 512,\n", " 'learning_rate': 2e-5,\n", " 'num_epochs': 3,\n", " 'max_depth': 10,\n", " 'test_split': 0.2,\n", " 'random_seed': 42,\n", " 'is_kaggle': env_config['is_kaggle']\n", "}\n", "\n", "torch.manual_seed(CONFIG['random_seed'])\n", "np.random.seed(CONFIG['random_seed'])\n", "print(f\"Configuration: {CONFIG}\")\n", "\n", "# --- 4. Data Loading ---\n", "def load_and_preprocess_data(file_path):\n", " try:\n", " print(f\"Loading dataset from: {file_path}\")\n", " if not os.path.exists(file_path) and CONFIG['is_kaggle']:\n", " input_dir = '/kaggle/input'\n", " print(f\"File not found. Searching in {input_dir}...\")\n", " for root, dirs, files in os.walk(input_dir):\n", " for file in files:\n", " if 'csic' in file.lower() or 'dataset' in file.lower():\n", " file_path = os.path.join(root, file)\n", " break\n", " if file_path.endswith('.xlsx'):\n", " df = pd.read_excel(file_path)\n", " elif file_path.endswith('.csv'):\n", " df = pd.read_csv(file_path)\n", " else:\n", " try: df = pd.read_excel(file_path)\n", " except: df = pd.read_csv(file_path)\n", " print(f\"Dataset loaded! Shape: {df.shape}\")\n", " label_columns = ['classification','label','class','target']\n", " label_col = next((c for c in label_columns if c in df.columns), None)\n", " if label_col:\n", " print(f\"Label distribution:\\n{df[label_col].value_counts()}\")\n", " return df\n", " except Exception as e:\n", " print(f\"Error loading file: {e}\")\n", " return None\n", "\n", "# --- 5. Preprocess ---\n", "def preprocess_data(df):\n", " print(\"Preprocessing data...\")\n", " label_columns = ['classification','label','class','target']\n", " label_col = next((c for c in label_columns if c in df.columns), None)\n", " if label_col and label_col != 'label':\n", " df.rename(columns={label_col:'label'}, inplace=True)\n", " length_columns = ['lenght','length','len']\n", " for col in length_columns: df.drop(col, axis=1, inplace=True, errors='ignore')\n", " potential_text_cols = [\n", " 'Method','method','HTTP_Method','User-Agent','user-agent','useragent','User_Agent',\n", " 'Pragma','pragma','Cache-Control','cache-control','Cache_Control','Accept','accept',\n", " 'Accept-encoding','accept-encoding','Accept_Encoding','Accept-charset','accept-charset','Accept_Charset',\n", " 'language','Language','lang','host','Host','hostname','cookie','Cookie','cookies',\n", " 'content-type','Content-Type','Content_Type','contenttype','connection','Connection','URL','url','uri','path',\n", " 'content','Content','payload','data','body']\n", " available_text_cols = []\n", " for col in df.columns:\n", " if col in potential_text_cols or any(k in col.lower() for k in ['method','agent','url','content','header']):\n", " available_text_cols.append(col)\n", " df[col] = df[col].astype(str).fillna('')\n", " print(f\"Available text columns: {available_text_cols}\")\n", " if available_text_cols:\n", " combined_parts = [f'{col}: '+df[col].astype(str) for col in available_text_cols]\n", " df['combined_text'] = combined_parts[0]\n", " for part in combined_parts[1:]:\n", " df['combined_text'] += ' '+part\n", " else:\n", " text_cols = df.select_dtypes(include=['object']).columns.tolist()\n", " if 'label' in text_cols: text_cols.remove('label')\n", " if text_cols:\n", " print(f\"Using all object columns as text: {text_cols}\")\n", " combined_parts = [f'{col}: '+df[col].astype(str).fillna('') for col in text_cols]\n", " df['combined_text'] = combined_parts[0]\n", " for part in combined_parts[1:]:\n", " df['combined_text'] += ' '+part\n", " else:\n", " print(\"No text columns found!\")\n", " return None,None,None\n", " if 'label' not in df.columns:\n", " print(\"No 'label' column found!\")\n", " return None,None,None\n", " combined_text = df['combined_text']\n", " y_raw = df['label']\n", " label_encoder = LabelEncoder()\n", " y = label_encoder.fit_transform(y_raw)\n", " print(f\"Classes: {label_encoder.classes_}\")\n", " return combined_text,y,label_encoder\n", "\n", "# --- 6. Dataset ---\n", "class CSICBertDataset(Dataset):\n", " def __init__(self, encodings, labels):\n", " self.encodings = encodings; self.labels = labels\n", " def __len__(self): return len(self.labels)\n", " def __getitem__(self, idx):\n", " item = {k:v[idx] for k,v in self.encodings.items()}\n", " item['labels'] = self.labels[idx]; return item\n", "\n", "# --- 7. Model ---\n", "class BertClassifier(nn.Module):\n", " def __init__(self, n_classes, dropout_rate=0.3):\n", " super().__init__()\n", " self.bert = BertModel.from_pretrained('bert-base-uncased')\n", " self.dropout = nn.Dropout(dropout_rate)\n", " self.classifier = nn.Linear(self.bert.config.hidden_size, n_classes)\n", " def forward(self,input_ids,attention_mask):\n", " outputs = self.bert(input_ids=input_ids,attention_mask=attention_mask)\n", " cls_embedding = self.dropout(outputs.last_hidden_state[:,0,:])\n", " logits = self.classifier(cls_embedding)\n", " return logits,cls_embedding\n", "\n", "# --- 8. Training with checkpoint ---\n", "def train_model(model, train_loader, val_loader, device, config,\n", " optimizer, scheduler, resume_epoch=0,\n", " train_losses=None, val_losses=None, val_accuracies=None):\n", " if train_losses is None: train_losses=[]\n", " if val_losses is None: val_losses=[]\n", " if val_accuracies is None: val_accuracies=[]\n", " criterion = nn.CrossEntropyLoss()\n", " for epoch in range(resume_epoch, config['num_epochs']):\n", " model.train(); total_train_loss=0\n", " for batch in tqdm(train_loader,desc=f'Epoch {epoch+1}/{config[\"num_epochs\"]} [Train]'):\n", " input_ids=batch['input_ids'].to(device)\n", " attention_mask=batch['attention_mask'].to(device)\n", " labels=batch['labels'].to(device)\n", " optimizer.zero_grad()\n", " logits,_=model(input_ids,attention_mask)\n", " loss=criterion(logits,labels); loss.backward()\n", " torch.nn.utils.clip_grad_norm_(model.parameters(),1.0)\n", " optimizer.step(); scheduler.step()\n", " total_train_loss+=loss.item()\n", " avg_train_loss=total_train_loss/len(train_loader)\n", " train_losses.append(avg_train_loss)\n", " # validation\n", " model.eval(); total_val_loss=0; correct=0; total=0\n", " with torch.no_grad():\n", " for batch in tqdm(val_loader,desc=f'Epoch {epoch+1}/{config[\"num_epochs\"]} [Val]'):\n", " input_ids=batch['input_ids'].to(device)\n", " attention_mask=batch['attention_mask'].to(device)\n", " labels=batch['labels'].to(device)\n", " logits,_=model(input_ids,attention_mask)\n", " loss=criterion(logits,labels); total_val_loss+=loss.item()\n", " preds=torch.argmax(logits,dim=1)\n", " correct+=(preds==labels).sum().item(); total+=labels.size(0)\n", " avg_val_loss=total_val_loss/len(val_loader); val_acc=correct/total\n", " val_losses.append(avg_val_loss); val_accuracies.append(val_acc)\n", " print(f\"Epoch {epoch+1}: Train Loss {avg_train_loss:.4f} Val Loss {avg_val_loss:.4f} Val Acc {val_acc:.4f}\")\n", " # save checkpoint\n", " checkpoint_path=os.path.join(config['output_dir'],'bert_checkpoint.pt')\n", " torch.save({\n", " 'epoch':epoch+1,\n", " 'model_state_dict':model.state_dict(),\n", " 'optimizer_state_dict':optimizer.state_dict(),\n", " 'scheduler_state_dict':scheduler.state_dict(),\n", " 'train_losses':train_losses,\n", " 'val_losses':val_losses,\n", " 'val_accuracies':val_accuracies\n", " },checkpoint_path)\n", " print(f\"Checkpoint saved at {checkpoint_path}\")\n", " return train_losses,val_losses,val_accuracies\n", "\n", "# --- 9. Evaluation ---\n", "def evaluate_model(model, test_loader, device, label_encoder, config):\n", " model.eval(); all_preds=[]; all_trues=[]\n", " with torch.no_grad():\n", " for batch in tqdm(test_loader,desc='Evaluating'):\n", " input_ids=batch['input_ids'].to(device)\n", " attention_mask=batch['attention_mask'].to(device)\n", " labels=batch['labels'].to(device)\n", " logits,_=model(input_ids,attention_mask)\n", " preds=torch.argmax(logits,dim=1)\n", " all_preds.extend(preds.cpu().tolist()); all_trues.extend(labels.cpu().tolist())\n", " accuracy=accuracy_score(all_trues,all_preds)\n", " f1w=f1_score(all_trues,all_preds,average='weighted')\n", " print(f\"Test Accuracy: {accuracy:.4f} Weighted F1: {f1w:.4f}\")\n", " print(classification_report(all_trues,all_preds,target_names=label_encoder.classes_.astype(str)))\n", " cm=confusion_matrix(all_trues,all_preds)\n", " plt.figure(figsize=(10,8))\n", " sns.heatmap(cm,annot=True,fmt='d',cmap='Blues',\n", " xticklabels=label_encoder.classes_.astype(str),\n", " yticklabels=label_encoder.classes_.astype(str))\n", " plt.title('Confusion Matrix'); plt.ylabel('True'); plt.xlabel('Predicted')\n", " plt.tight_layout()\n", " plt.savefig(os.path.join(config['output_dir'],'confusion_matrix.png'),dpi=300)\n", " plt.show()\n", " return all_preds,all_trues,accuracy,f1w\n", "\n", "# --- 11. Main ---\n", "def main():\n", " print(\"=\"*60)\n", " print(\"CSIC BERT CLASSIFIER WITH RESUME SUPPORT\")\n", " print(\"=\"*60)\n", " df=load_and_preprocess_data(CONFIG['file_path'])\n", " if df is None: return\n", " combined_text,y,label_encoder=preprocess_data(df)\n", " if combined_text is None: return\n", " y_tensor=torch.tensor(y,dtype=torch.long)\n", " tokenizer=BertTokenizer.from_pretrained('bert-base-uncased')\n", " tokenized_inputs=tokenizer(\n", " combined_text.tolist(),\n", " padding='max_length',truncation=True,max_length=CONFIG['max_length'],return_tensors=\"pt\")\n", " dataset=CSICBertDataset(tokenized_inputs,y_tensor)\n", " total_size=len(dataset)\n", " test_size=int(CONFIG['test_split']*total_size)\n", " train_val_size=total_size-test_size\n", " val_size=int(0.1*total_size)\n", " train_size=train_val_size-val_size\n", " train_ds,val_ds,test_ds=random_split(dataset,[train_size,val_size,test_size],\n", " generator=torch.Generator().manual_seed(CONFIG['random_seed']))\n", " print(f\"Splits - Train: {train_size} Val: {val_size} Test: {test_size}\")\n", " train_loader=DataLoader(train_ds,batch_size=CONFIG['batch_size'],shuffle=True)\n", " val_loader=DataLoader(val_ds,batch_size=CONFIG['batch_size'],shuffle=False)\n", " test_loader=DataLoader(test_ds,batch_size=CONFIG['batch_size'],shuffle=False)\n", " device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " model=BertClassifier(n_classes=len(label_encoder.classes_)).to(device)\n", " optimizer=torch.optim.AdamW(model.parameters(),lr=CONFIG['learning_rate'])\n", " total_steps=len(train_loader)*CONFIG['num_epochs']\n", " scheduler=get_linear_schedule_with_warmup(\n", " optimizer,num_warmup_steps=int(0.1*total_steps),num_training_steps=total_steps)\n", " # resume logic\n", " checkpoint_path=os.path.join(CONFIG['output_dir'],'bert_checkpoint.pt')\n", " resume_epoch=0; train_losses=val_losses=val_accuracies=None\n", " if os.path.exists(checkpoint_path):\n", " print(f\"Resuming from checkpoint: {checkpoint_path}\")\n", " checkpoint=torch.load(checkpoint_path,map_location=device)\n", " model.load_state_dict(checkpoint['model_state_dict'])\n", " optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n", " scheduler.load_state_dict(checkpoint['scheduler_state_dict'])\n", " resume_epoch=checkpoint['epoch']\n", " train_losses=checkpoint['train_losses']; val_losses=checkpoint['val_losses']; val_accuracies=checkpoint['val_accuracies']\n", " train_losses,val_losses,val_accuracies=train_model(\n", " model,train_loader,val_loader,device,CONFIG,optimizer,scheduler,resume_epoch,\n", " train_losses,val_losses,val_accuracies)\n", " # evaluate\n", " all_preds,all_trues,accuracy,f1w=evaluate_model(model,test_loader,device,label_encoder,CONFIG)\n", " # save final model\n", " model_path=os.path.join(CONFIG['output_dir'],'bert_classifier_model.pt')\n", " torch.save({'model_state_dict':model.state_dict(),\n", " 'label_encoder':label_encoder,\n", " 'config':CONFIG,\n", " 'test_accuracy':accuracy,\n", " 'f1_score':f1w},model_path)\n", " print(f\"Model saved to {model_path}\")\n", " print(\"Done.\")\n", "\n", "if __name__==\"__main__\":\n", " main() " ] }, { "cell_type": "code", "execution_count": null, "id": "44056c38", "metadata": { "vscode": { "languageId": "plaintext" } }, "outputs": [], "source": [ "# ==============================================================================\n", "# Complete BERT → DistilBERT + MLP Knowledge Distillation Pipeline\n", "# CSIC 2010 Web Application Attack Detector (Adaptive WAF)\n", "# ==============================================================================\n", "\n", "# --- 0. Setup and Imports (omitted for brevity, assume the user's provided imports) ---\n", "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "from torch.utils.data import DataLoader, Dataset, random_split\n", "import numpy as np\n", "import pandas as pd\n", "from sklearn.preprocessing import LabelEncoder\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import roc_curve, auc, classification_report, accuracy_score, f1_score\n", "from sklearn.tree import DecisionTreeClassifier, export_text # XAI Import\n", "import matplotlib.pyplot as plt\n", "import seaborn as sns\n", "from transformers import (\n", " BertTokenizer, BertModel,\n", " DistilBertModel,\n", " get_linear_schedule_with_warmup\n", ")\n", "from torch.optim import AdamW\n", "from tqdm.auto import tqdm\n", "import warnings\n", "import os\n", "\n", "warnings.filterwarnings('ignore')\n", "torch.manual_seed(42)\n", "np.random.seed(42)\n", "\n", "# --- 1. Configuration & Environment ---\n", "CHECKPOINT_PATH = '/kaggle/working/bert_classifier_model.pt'\n", "DATASET_PATH = '/kaggle/input/csic-2010-web-application-attacks/csic_database.csv'\n", "BEST_MODEL_PATH = '/kaggle/working/best_student_waf_model.pt'\n", "MAX_LENGTH = 512\n", "BATCH_SIZE = 16\n", "NUM_EPOCHS = 5\n", "LEARNING_RATE = 2e-5\n", "OUTPUT_DIR = '/kaggle/working'\n", "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", "XAI_MAX_DEPTH = 10\n", "\n", "# --- 2. Dataset and Data Loading (TextDataset and load_csic_dataset functions remain the same) ---\n", "class TextDataset(Dataset):\n", " def __init__(self, texts, labels, tokenizer, max_length):\n", " self.texts = texts\n", " self.labels = labels\n", " self.tokenizer = tokenizer\n", " self.max_length = max_length\n", " \n", " def __len__(self):\n", " return len(self.texts)\n", " \n", " def __getitem__(self, idx):\n", " text = str(self.texts[idx])\n", " label = self.labels[idx]\n", " \n", " encoding = self.tokenizer(\n", " text,\n", " truncation=True,\n", " padding='max_length',\n", " max_length=self.max_length,\n", " return_tensors='pt'\n", " )\n", " \n", " return {\n", " 'input_ids': encoding['input_ids'].flatten(),\n", " 'attention_mask': encoding['attention_mask'].flatten(),\n", " 'label': torch.tensor(label, dtype=torch.long)\n", " }\n", "\n", "def load_csic_dataset(file_path):\n", " try:\n", " df = pd.read_csv(file_path)\n", " except FileNotFoundError:\n", " print(f\"Error: Dataset not found at {file_path}. Please check the path.\")\n", " return None, None, None\n", "\n", " text_columns = ['Method', 'User-Agent', 'Pragma', 'Cache-Control', 'Accept',\n", " 'Accept-encoding', 'Accept-charset', 'language', 'host',\n", " 'cookie', 'content-type', 'connection', 'content', 'URL']\n", " \n", " df['combined_text'] = ''\n", " for col in text_columns:\n", " if col in df.columns:\n", " df['combined_text'] += df[col].fillna('').astype(str) + ' '\n", " \n", " df['combined_text'] = df['combined_text'].str.strip()\n", " \n", " texts = df['combined_text'].values\n", " labels_raw = df['classification'].values\n", " \n", " le = LabelEncoder()\n", " labels = le.fit_transform(labels_raw)\n", "\n", " print(f\"Dataset loaded! Shape: {df.shape}\")\n", " print(f\"Label distribution:\\n{df['classification'].value_counts()}\")\n", " return texts, labels, le\n", "\n", "# --- 3. Model Architectures (TeacherBERT, StudentDistilBERT, StudentMLP functions remain the same) ---\n", "class TeacherBERT(nn.Module):\n", " def __init__(self, n_classes, model_name='bert-base-uncased', dropout_rate=0.3):\n", " super(TeacherBERT, self).__init__()\n", " self.bert = BertModel.from_pretrained(model_name)\n", " self.dropout = nn.Dropout(dropout_rate)\n", " self.classifier = nn.Linear(self.bert.config.hidden_size, n_classes)\n", " \n", " def forward(self, input_ids, attention_mask):\n", " outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)\n", " cls_embedding = self.dropout(outputs.last_hidden_state[:, 0, :])\n", " logits = self.classifier(cls_embedding)\n", " return logits\n", "\n", "class StudentDistilBERT(nn.Module):\n", " def __init__(self, model_name='distilbert-base-uncased', num_classes=2, dropout=0.1):\n", " super(StudentDistilBERT, self).__init__()\n", " self.distilbert = DistilBertModel.from_pretrained(model_name)\n", " self.dropout = nn.Dropout(dropout)\n", " self.classifier = nn.Linear(self.distilbert.config.hidden_size, num_classes)\n", " \n", " def forward(self, input_ids, attention_mask):\n", " outputs = self.distilbert(input_ids=input_ids, attention_mask=attention_mask)\n", " pooled_output = outputs.last_hidden_state[:, 0]\n", " pooled_output_dropped = self.dropout(pooled_output)\n", " logits = self.classifier(pooled_output_dropped)\n", " return logits, pooled_output\n", "\n", "class StudentMLP(nn.Module):\n", " def __init__(self, vocab_size=30522, embed_dim=128, hidden_dims=[256, 128], \n", " num_classes=2, dropout=0.3):\n", " super(StudentMLP, self).__init__()\n", " self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n", " self.dropout = nn.Dropout(dropout)\n", " \n", " layers = []\n", " input_dim = embed_dim\n", " for hidden_dim in hidden_dims:\n", " layers.extend([\n", " nn.Linear(input_dim, hidden_dim),\n", " nn.ReLU(),\n", " nn.Dropout(dropout)\n", " ])\n", " input_dim = hidden_dim\n", " \n", " layers.append(nn.Linear(input_dim, num_classes))\n", " self.mlp = nn.Sequential(*layers)\n", " \n", " def forward(self, input_ids, attention_mask):\n", " embeddings = self.embedding(input_ids)\n", " mask = attention_mask.unsqueeze(-1).float()\n", " masked_embeddings = embeddings * mask\n", " pooled = masked_embeddings.sum(dim=1) / mask.sum(dim=1).clamp(min=1e-9)\n", " pooled = self.dropout(pooled)\n", " logits = self.mlp(pooled)\n", " return logits\n", "\n", "# --- 4. Distillation Loss and Training Function (DistillationLoss and train_student_model functions remain the same) ---\n", "class DistillationLoss(nn.Module):\n", " def __init__(self, alpha=0.7, temperature=4.0):\n", " super(DistillationLoss, self).__init__()\n", " self.alpha = alpha\n", " self.temperature = temperature\n", " self.kl_div = nn.KLDivLoss(reduction='batchmean')\n", " self.ce_loss = nn.CrossEntropyLoss()\n", " \n", " def forward(self, student_logits, teacher_logits, labels):\n", " teacher_probs = F.softmax(teacher_logits / self.temperature, dim=1)\n", " student_log_probs = F.log_softmax(student_logits / self.temperature, dim=1)\n", " distillation_loss = self.kl_div(student_log_probs, teacher_probs) * (self.temperature ** 2)\n", " student_loss = self.ce_loss(student_logits, labels)\n", " total_loss = self.alpha * distillation_loss + (1 - self.alpha) * student_loss\n", " return total_loss, distillation_loss, student_loss\n", "\n", "def train_student_model(student_model, teacher_model, train_loader, val_loader, device, name):\n", " student_model.to(device)\n", " teacher_model.to(device)\n", " teacher_model.eval()\n", " \n", " optimizer = AdamW(student_model.parameters(), lr=LEARNING_RATE)\n", " distillation_criterion = DistillationLoss(alpha=0.7, temperature=4.0)\n", " \n", " total_steps = len(train_loader) * NUM_EPOCHS\n", " scheduler = get_linear_schedule_with_warmup(\n", " optimizer, num_warmup_steps=0, num_training_steps=total_steps\n", " )\n", " \n", " train_losses = []\n", " val_accuracies = []\n", " \n", " print(f\"\\n--- Starting Distillation for {name} (Epochs: {NUM_EPOCHS}) ---\")\n", " for epoch in range(NUM_EPOCHS):\n", " student_model.train()\n", " total_loss = 0\n", " \n", " progress_bar = tqdm(train_loader, desc=f'Epoch {epoch+1}/{NUM_EPOCHS} [{name}]')\n", " for batch in progress_bar:\n", " input_ids = batch['input_ids'].to(device)\n", " attention_mask = batch['attention_mask'].to(device)\n", " labels = batch['label'].to(device)\n", " \n", " with torch.no_grad():\n", " teacher_logits = teacher_model(input_ids, attention_mask)\n", " \n", " if name == 'DistilBERT':\n", " student_logits, _ = student_model(input_ids, attention_mask)\n", " else:\n", " student_logits = student_model(input_ids, attention_mask)\n", " \n", " loss, dist_loss, student_loss = distillation_criterion(\n", " student_logits, teacher_logits, labels\n", " )\n", " \n", " optimizer.zero_grad()\n", " loss.backward()\n", " torch.nn.utils.clip_grad_norm_(student_model.parameters(), 1.0)\n", " optimizer.step()\n", " scheduler.step()\n", " \n", " total_loss += loss.item()\n", " \n", " progress_bar.set_postfix({\n", " 'Loss': f'{loss.item():.4f}',\n", " 'Dist': f'{dist_loss.item():.4f}',\n", " })\n", " \n", " avg_train_loss = total_loss / len(train_loader)\n", " train_losses.append(avg_train_loss)\n", " \n", " val_accuracy, _ = evaluate_model(student_model, val_loader, device, is_distilbert=(name=='DistilBERT'))\n", " val_accuracies.append(val_accuracy)\n", " \n", " print(f'Epoch {epoch+1}: Train Loss: {avg_train_loss:.4f}, Val Accuracy: {val_accuracy:.4f}')\n", " \n", " return train_losses, val_accuracies\n", "\n", "# --- 5. Evaluation, Checkpoint Loading, and Plotting (load_teacher_checkpoint, evaluate_model, plot_roc_comparison functions remain the same) ---\n", "def load_teacher_checkpoint(checkpoint_path, n_classes):\n", " print(\"Loading teacher checkpoint...\")\n", " \n", " teacher_model = TeacherBERT(n_classes=n_classes)\n", " teacher_model.to(DEVICE)\n", " \n", " try:\n", " teacher_ckpt = torch.load(checkpoint_path, map_location=DEVICE, weights_only=False)\n", " \n", " if 'model_state_dict' in teacher_ckpt:\n", " state_dict = teacher_ckpt['model_state_dict']\n", " \n", " new_state_dict = {}\n", " for k, v in state_dict.items():\n", " if k.startswith('module.'):\n", " k = k[7:]\n", " new_state_dict[k] = v\n", " \n", " teacher_model.load_state_dict(new_state_dict)\n", " \n", " print(\"✓ Teacher weights loaded successfully!\")\n", " return teacher_model, teacher_ckpt.get('label_encoder'), teacher_ckpt.get('config')\n", " else:\n", " print(\"✗ Checkpoint file is missing 'model_state_dict'. Cannot load weights.\")\n", " return None, None, None\n", " \n", " except Exception as e:\n", " print(f\"✗ Failed to load teacher checkpoint: {e}\")\n", " print(\"Using randomly initialized BERT Teacher. Expect poor performance.\")\n", " return teacher_model, None, None\n", "\n", "def evaluate_model(model, dataloader, device, return_probs=False, is_distilbert=False):\n", " model.eval()\n", " all_preds = []\n", " all_labels = []\n", " all_probs = []\n", " total_loss = 0\n", " criterion = nn.CrossEntropyLoss()\n", " \n", " with torch.no_grad():\n", " for batch in tqdm(dataloader, desc=\"Evaluating\"):\n", " input_ids = batch['input_ids'].to(device)\n", " attention_mask = batch['attention_mask'].to(device)\n", " labels = batch['label'].to(device)\n", " \n", " if is_distilbert and isinstance(model, StudentDistilBERT):\n", " logits, _ = model(input_ids, attention_mask)\n", " else:\n", " logits = model(input_ids, attention_mask)\n", " \n", " loss = criterion(logits, labels)\n", " total_loss += loss.item()\n", " \n", " probs = F.softmax(logits, dim=1)\n", " preds = torch.argmax(logits, dim=1)\n", " \n", " all_preds.extend(preds.cpu().numpy())\n", " all_labels.extend(labels.cpu().numpy())\n", " all_probs.extend(probs.cpu().numpy())\n", " \n", " accuracy = accuracy_score(all_labels, all_preds)\n", " avg_loss = total_loss / len(dataloader)\n", " \n", " if return_probs:\n", " return accuracy, avg_loss, all_labels, all_preds, all_probs\n", " return accuracy, avg_loss\n", "\n", "def plot_roc_comparison(models_data, save_path=None):\n", " plt.figure(figsize=(12, 8))\n", " colors = ['blue', 'red', 'green', 'orange']\n", " \n", " for i, (name, labels, probs) in enumerate(models_data):\n", " y_score = np.array(probs)[:, 1] if len(probs[0]) > 1 else np.array(probs)\n", " \n", " fpr, tpr, _ = roc_curve(labels, y_score)\n", " roc_auc = auc(fpr, tpr)\n", " \n", " plt.plot(fpr, tpr, color=colors[i % len(colors)], lw=2,\n", " label=f'{name} (AUC = {roc_auc:.4f})')\n", " \n", " plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--', alpha=0.5)\n", " plt.xlim([0.0, 1.0])\n", " plt.ylim([0.0, 1.05])\n", " plt.xlabel('False Positive Rate', fontsize=12)\n", " plt.ylabel('True Positive Rate', fontsize=12)\n", " plt.title('ROC Curve Comparison: Teacher vs Student Models', fontsize=14, fontweight='bold')\n", " plt.legend(loc=\"lower right\", fontsize=11)\n", " plt.grid(True, alpha=0.3)\n", " \n", " if save_path:\n", " plt.savefig(save_path, dpi=300, bbox_inches='tight')\n", " plt.show()\n", "\n", "# --- 6. Main Distillation Pipeline ---\n", "\n", "def main_distillation_pipeline():\n", " print(\"=\" * 80)\n", " print(\"BERT → DistilBERT + MLP Knowledge Distillation Pipeline\")\n", " print(\"CSIC 2010 Web Application Attacks Dataset\")\n", " print(\"=\" * 80)\n", " \n", " # Load and preprocess data\n", " texts, encoded_labels, label_encoder = load_csic_dataset(DATASET_PATH)\n", " if texts is None: return\n", "\n", " num_classes = len(label_encoder.classes_)\n", " \n", " # --- CRITICAL CHANGE: Use ALL samples, remove subsetting logic ---\n", " # The splitting will now use the entire dataset (approx. 61k samples)\n", " # Split: 60% Train, 20% Val, 20% Test\n", " \n", " X_train, X_test, y_train, y_test = train_test_split(\n", " texts, encoded_labels, test_size=0.2, random_state=42, stratify=encoded_labels\n", " )\n", " X_train, X_val, y_train, y_val = train_test_split(\n", " X_train, y_train, test_size=0.25, random_state=42, stratify=y_train \n", " )\n", " \n", " print(f\"Data Splits - Train: {len(X_train)}, Val: {len(X_val)}, Test: {len(X_test)}\")\n", " \n", " # Initialize tokenizers and DataLoaders\n", " tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n", " \n", " train_dataset = TextDataset(X_train, y_train, tokenizer, MAX_LENGTH)\n", " val_loader = DataLoader(TextDataset(X_val, y_val, tokenizer, MAX_LENGTH), batch_size=BATCH_SIZE, shuffle=False)\n", " test_loader = DataLoader(TextDataset(X_test, y_test, tokenizer, MAX_LENGTH), batch_size=BATCH_SIZE, shuffle=False)\n", " \n", " # --- Step 1: Initialize and Load Teacher Model ---\n", " teacher_model, _, _ = load_teacher_checkpoint(CHECKPOINT_PATH, num_classes)\n", " if teacher_model is None:\n", " print(\"Cannot proceed with distillation without a valid Teacher model.\")\n", " return\n", "\n", " # --- Step 2: Initialize Student Models ---\n", " student_distilbert = StudentDistilBERT(num_classes=num_classes)\n", " student_mlp = StudentMLP(num_classes=num_classes, vocab_size=tokenizer.vocab_size)\n", " \n", " print(f\"\\nModel Parameters (Teacher: {sum(p.numel() for p in teacher_model.parameters()):,} | DistilBERT: {sum(p.numel() for p in student_distilbert.parameters()):,} | MLP: {sum(p.numel() for p in student_mlp.parameters()):,})\")\n", "\n", " # --- Step 3: Train Students ---\n", " train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)\n", " distilbert_metrics = train_student_model(student_distilbert, teacher_model, train_loader, val_loader, DEVICE, 'DistilBERT')\n", " mlp_metrics = train_student_model(student_mlp, teacher_model, train_loader, val_loader, DEVICE, 'MLP')\n", " \n", " # --- Step 4: Final Evaluation and Model Saving ---\n", " print(\"\\n\" + \"=\"*80)\n", " print(\"FINAL EVALUATION on Test Set & BEST MODEL SAVING\")\n", " print(\"=\"*80)\n", " \n", " models = {\n", " 'Teacher BERT (Corrected)': teacher_model,\n", " 'Student DistilBERT': student_distilbert,\n", " 'Student MLP': student_mlp\n", " }\n", " \n", " best_f1 = -1\n", " best_model_name = \"\"\n", " best_model_data = None\n", " models_roc_data = []\n", "\n", " for name, model in models.items():\n", " is_distilbert_eval = (\"DistilBERT\" in name)\n", " \n", " accuracy, loss, labels, preds, probs = evaluate_model(\n", " model, test_loader, DEVICE, return_probs=True, is_distilbert=is_distilbert_eval\n", " )\n", " \n", " models_roc_data.append((name, labels, probs))\n", " f1w = f1_score(labels, preds, average='weighted')\n", " \n", " print(f\"\\n{name} - Accuracy: {accuracy:.4f}, Weighted F1: {f1w:.4f}, Loss: {loss:.4f}\")\n", " print(classification_report(labels, preds, target_names=label_encoder.classes_.astype(str)))\n", "\n", " if \"Teacher\" not in name and f1w > best_f1:\n", " best_f1 = f1w\n", " best_model_name = name\n", " best_model_data = {\n", " 'model_state_dict': model.state_dict(),\n", " 'label_encoder': label_encoder,\n", " 'config': {'max_length': MAX_LENGTH, 'batch_size': BATCH_SIZE},\n", " 'test_accuracy': accuracy,\n", " 'f1_score': f1w,\n", " 'model_architecture': name\n", " }\n", "\n", " if best_model_data:\n", " torch.save(best_model_data, BEST_MODEL_PATH)\n", " print(\"\\n\" + \"=\"*80)\n", " print(f\"✅ FINAL DEPLOYMENT MODEL SAVED: {best_model_name} (F1: {best_f1:.4f})\")\n", " print(f\"File: {BEST_MODEL_PATH}\")\n", " print(\"=\"*80)\n", " else:\n", " print(\"\\n❌ Could not save the best model.\")\n", "\n", "\n", " # --- Step 5: Visualization ---\n", " print(\"\\n--- Visualizing ROC Curves ---\")\n", " plot_roc_comparison(models_roc_data, os.path.join(OUTPUT_DIR, 'roc_comparison.png'))\n", " \n", " print(\"\\n✓ Cybersecurity knowledge distillation pipeline completed successfully!\")\n", " \n", " # --- Step 6: Trigger XAI Agent ---\n", " if best_model_data and 'DistilBERT' in best_model_data.get('model_architecture', ''):\n", " main_xai_agent(X_test, y_test, label_encoder.classes_.tolist())\n", " else:\n", " print(\"\\nSkipping XAI Agent: The best model was not DistilBERT or data was unavailable.\")\n", "\n", "# --- 7. XAI Core Functions (Extracted from XAI Agent - functions remain the same) ---\n", "\n", "def extract_features(model, dataloader, device) -> np.ndarray:\n", " model.eval()\n", " all_features = []\n", " \n", " with torch.no_grad():\n", " for batch in tqdm(dataloader, desc=\"Extracting Features for XAI\"):\n", " input_ids = batch['input_ids'].to(device)\n", " attention_mask = batch['attention_mask'].to(device)\n", " \n", " _, features = model(input_ids, attention_mask)\n", " all_features.append(features.cpu().numpy())\n", " \n", " return np.concatenate(all_features, axis=0)\n", "\n", "def generate_xai_rules(X_features: np.ndarray, y_labels: np.ndarray, feature_names: list, class_names: list) -> str:\n", " print(\"\\nTraining Decision Tree Surrogate Model...\")\n", " \n", " dt_model = DecisionTreeClassifier(max_depth=XAI_MAX_DEPTH, random_state=42)\n", " dt_model.fit(X_features, y_labels)\n", " \n", " dt_preds = dt_model.predict(X_features)\n", " dt_acc = accuracy_score(y_labels, dt_preds)\n", " print(f\"Decision Tree (Surrogate) Accuracy on Extracted Features: {dt_acc:.4f}\")\n", " \n", " rules = export_text(\n", " dt_model, \n", " feature_names=feature_names, \n", " class_names=class_names\n", " )\n", " return rules\n", "\n", "def main_xai_agent(X_test, y_test, class_names_list):\n", " print(\"\\n\" + \"=\"*80)\n", " print(\"XAI AGENT: Rule Generation for Adaptive WAF (Surrogate Model)\")\n", " print(\"=\"*80)\n", " \n", " checkpoint = torch.load(BEST_MODEL_PATH, map_location=DEVICE)\n", " num_classes = len(class_names_list)\n", " \n", " model = StudentDistilBERT(num_classes=num_classes).to(DEVICE)\n", " model.load_state_dict(checkpoint['model_state_dict'])\n", " \n", " print(f\"Loading '{checkpoint.get('model_architecture')}' with F1-Score: {checkpoint['f1_score']:.4f} for XAI...\")\n", "\n", " tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n", " test_dataset = TextDataset(X_test, y_test, tokenizer, MAX_LENGTH)\n", " test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False)\n", "\n", " X_features = extract_features(model, test_loader, DEVICE)\n", " feature_names = [f'CLS_Dim_{i}' for i in range(X_features.shape[1])]\n", " \n", " print(f\"Features extracted! Shape: {X_features.shape}\")\n", "\n", " xai_rules = generate_xai_rules(X_features, y_test, feature_names, class_names_list)\n", " \n", " rules_path = os.path.join(OUTPUT_DIR, 'waf_xai_rules.txt')\n", " with open(rules_path, 'w') as f:\n", " f.write(xai_rules)\n", " \n", " print(\"\\n\" + \"=\"*80)\n", " print(\"✅ XAI RULE GENERATION COMPLETE\")\n", " print(f\"Rules saved to: {rules_path}\")\n", " print(\"Sample Rules (Decision Tree Surrogate):\")\n", " print(\"=\"*80)\n", " print('\\n'.join(xai_rules.split('\\n')[:15]))\n", " print(\"... (Rules Truncated) ...\")\n", " print(\"=\"*80)\n", "\n", "if __name__ == \"__main__\":\n", " main_distillation_pipeline()" ] } ], "metadata": { "language_info": { "name": "python" } }, "nbformat": 4, "nbformat_minor": 5 }