{ "cells": [ { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Loading dataset for ATTACK-BERT...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Processing SRL data: 100%|██████████| 6759/6759 [00:02<00:00, 2615.69it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Loaded 6759 valid samples\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "d1fc0ec60ffa4cdd85da289670f2ed6f", "version_major": 2, "version_minor": 0 }, "text/plain": [ "config.json: 0%| | 0.00/655 [00:00= len(tags):\n", " break\n", " if tag != 'O' and '-' in tag:\n", " role = tag.split('-')[1]\n", " if role in self.role_tags:\n", " tags[idx].append(role)\n", " \n", " tagged_sentence = []\n", " for word, roles in zip(words, tags):\n", " for role in roles:\n", " if role in self.role_tags:\n", " tagged_sentence.append(f\"[{self.role_tags[role]}]\")\n", " tagged_sentence.append(word)\n", " for role in reversed(roles):\n", " if role in self.role_tags:\n", " tagged_sentence.append(f\"[/{self.role_tags[role]}]\")\n", " \n", " augmented.append(' '.join(tagged_sentence))\n", " return ' '.join(augmented)\n", "\n", "def load_srl_dataset(json_path):\n", " try:\n", " with open(json_path) as f:\n", " data = json.load(f).get('samples', [])\n", " except (json.JSONDecodeError, FileNotFoundError) as e:\n", " print(f\"Error loading JSON data: {e}\")\n", " return pd.DataFrame()\n", " \n", " processor = SRLProcessor()\n", " samples = []\n", " for sample in tqdm(data, desc=\"Processing SRL data\"):\n", " try:\n", " samples.append({\n", " 'CVE_text': processor.process_srl(sample.get('CVE_srl', [])),\n", " 'Technique_text': processor.process_srl(sample.get('Technique_srl', [])),\n", " 'label': sample.get('label', 0),\n", " 'role_score': sample.get('role_match_score', 0.0)\n", " })\n", " except Exception:\n", " continue\n", " \n", " df = pd.DataFrame(samples)\n", " print(f\"Loaded {len(df)} valid samples\")\n", " return df\n", "\n", "class HingeSiameseDataset(Dataset):\n", " def __init__(self, df, tokenizer, max_len=128):\n", " self.df = df\n", " self.tokenizer = tokenizer\n", " self.max_len = max_len\n", " \n", " if len(df) > 0:\n", " role_min = df['role_score'].min()\n", " role_max = df['role_score'].max()\n", " self.role_weights = (df['role_score'] - role_min) / (role_max - role_min + 1e-8)\n", " self.labels = 2 * df['label'].values - 1\n", " else:\n", " self.role_weights = pd.Series()\n", " self.labels = np.array([])\n", "\n", " def __len__(self):\n", " return len(self.df)\n", " \n", " def __getitem__(self, idx):\n", " row = self.df.iloc[idx]\n", " cve_encodings = self.tokenizer(row['CVE_text'], max_length=self.max_len, padding='max_length', truncation=True, return_tensors='pt')\n", " tech_encodings = self.tokenizer(row['Technique_text'], max_length=self.max_len, padding='max_length', truncation=True, return_tensors='pt')\n", " \n", " return {\n", " 'cve_input_ids': cve_encodings['input_ids'].squeeze(),\n", " 'cve_attention_mask': cve_encodings['attention_mask'].squeeze(),\n", " 'tech_input_ids': tech_encodings['input_ids'].squeeze(),\n", " 'tech_attention_mask': tech_encodings['attention_mask'].squeeze(),\n", " 'labels': torch.tensor(self.labels[idx], dtype=torch.float),\n", " 'role_weights': torch.tensor(self.role_weights.iloc[idx], dtype=torch.float),\n", " 'CVE_text': row['CVE_text'],\n", " 'Technique_text': row['Technique_text']\n", " }\n", "\n", "class EnhancedContrastiveSRLModel(nn.Module):\n", " def __init__(self, model_name=MODEL_STRING, hidden_size=768, margin=0.4, contrastive_weight=0.3):\n", " super().__init__()\n", " try:\n", " self.bert = AutoModel.from_pretrained(model_name)\n", " except Exception as e:\n", " raise RuntimeError(f\"Failed to initialize the model: {e}\")\n", " \n", " self.srl_proj = nn.Sequential(nn.Linear(hidden_size, 256), nn.ReLU(), nn.LayerNorm(256))\n", " self.classifier = nn.Sequential(nn.Linear(256 * 4, 512), nn.ReLU(), nn.Dropout(0.2), nn.Linear(512, 1))\n", " self.contrastive_loss = nn.CosineEmbeddingLoss(margin=margin)\n", " self.contrastive_weight = contrastive_weight\n", "\n", " def soft_align_attention(self, a, b, mask_a, mask_b):\n", " similarity = torch.bmm(a, b.transpose(1, 2))\n", " batch_size = mask_b.size(0)\n", " seq_len_a, seq_len_b = similarity.size(1), similarity.size(2)\n", " \n", " mask_b_exp = mask_b.unsqueeze(1).expand(batch_size, seq_len_a, seq_len_b)\n", " attn_weights_a = F.softmax(similarity.masked_fill(mask_b_exp == 0, -1e9), dim=2)\n", " \n", " mask_a_exp = mask_a.unsqueeze(2).expand(batch_size, seq_len_a, seq_len_b)\n", " attn_weights_b = F.softmax(similarity.transpose(1,2).masked_fill(mask_a_exp == 0, -1e9), dim=2)\n", " \n", " aligned_a = torch.bmm(attn_weights_a, b) \n", " aligned_b = torch.bmm(attn_weights_b, a) \n", " return aligned_a, aligned_b\n", "\n", " def pooling(self, token_embeddings, mask):\n", " mask = mask.unsqueeze(2).float() \n", " summed = torch.sum(token_embeddings * mask, dim=1)\n", " counts = mask.sum(dim=1).clamp(min=1e-9)\n", " valid_counts = (counts > 1e-8).float()\n", " return (summed / counts) * valid_counts\n", "\n", " def forward(self, cve_input, tech_input, labels=None):\n", " cve_outputs = self.bert(**cve_input)\n", " tech_outputs = self.bert(**tech_input)\n", " \n", " cve_seq = cve_outputs.last_hidden_state\n", " tech_seq = tech_outputs.last_hidden_state\n", " \n", " cve_mask, tech_mask = cve_input['attention_mask'], tech_input['attention_mask']\n", " \n", " aligned_cve, aligned_tech = self.soft_align_attention(cve_seq, tech_seq, cve_mask, tech_mask)\n", " \n", " cve_combined = (cve_seq + aligned_cve) / 2.0\n", " tech_combined = (tech_seq + aligned_tech) / 2.0\n", " \n", " cve_pooled = self.pooling(cve_combined, cve_mask)\n", " tech_pooled = self.pooling(tech_combined, tech_mask)\n", " \n", " cve_emb, tech_emb = self.srl_proj(cve_pooled), self.srl_proj(tech_pooled)\n", " \n", " diff = torch.abs(cve_emb - tech_emb)\n", " prod = cve_emb * tech_emb\n", " combined_features = torch.cat([cve_emb, tech_emb, diff, prod], dim=1)\n", " \n", " classifier_out = self.classifier(combined_features).squeeze()\n", " cont_loss = torch.tensor(0.0, device=classifier_out.device)\n", " \n", " if labels is not None:\n", " contrastive_labels = torch.where(labels > 0, 1.0, -1.0).to(labels.device)\n", " cont_loss = self.contrastive_loss(cve_emb, tech_emb, contrastive_labels)\n", " \n", " return classifier_out, cont_loss\n", "\n", " def get_embeddings(self, input_dict):\n", " with torch.no_grad():\n", " outputs = self.bert(**input_dict)\n", " pooled = self.pooling(outputs.last_hidden_state, input_dict['attention_mask'])\n", " return self.srl_proj(pooled).cpu().numpy()\n", "\n", "class WeightedHingeLoss(nn.Module):\n", " def __init__(self, margin=1.0):\n", " super().__init__()\n", " self.margin = margin\n", " def forward(self, outputs, labels, weights):\n", " losses = torch.clamp(self.margin - labels * outputs, min=0)\n", " return (losses * (1 + weights)).mean()\n", "\n", "def get_splits_for_model(df, test_size=0.15, val_size=0.15, random_state=42):\n", " train_val_df, test_df = train_test_split(df, test_size=test_size, random_state=random_state, stratify=df['label'])\n", " relative_val_size = val_size / (1 - test_size)\n", " train_df, val_df = train_test_split(train_val_df, test_size=relative_val_size, random_state=random_state, stratify=train_val_df['label'])\n", " return train_df, val_df, test_df\n", "\n", "def train_hinge_model(train_df, results_dir, val_df=None, epochs=10, batch_size=16, margin=1.0, patience=3, lr=2e-5, contrastive_weight=0.4):\n", " tokenizer = AutoTokenizer.from_pretrained(MODEL_STRING)\n", " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", " model = EnhancedContrastiveSRLModel(model_name=MODEL_STRING, contrastive_weight=contrastive_weight).to(device)\n", " \n", " train_dataset = HingeSiameseDataset(train_df, tokenizer)\n", " train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n", " val_loader = DataLoader(HingeSiameseDataset(val_df, tokenizer), batch_size=batch_size) if val_df is not None else None\n", " \n", " optimizer = optim.AdamW(model.parameters(), lr=lr)\n", " criterion = WeightedHingeLoss(margin=margin)\n", " history = {'train_loss': [], 'train_acc': [], 'val_loss': [], 'val_acc': []}\n", " best_val_acc = 0\n", " patience_counter = 0\n", " best_model_path = os.path.join(results_dir, \"best_model.pth\")\n", " \n", " for epoch in range(epochs):\n", " model.train()\n", " train_loss = cont_loss_total = train_correct = train_total = 0\n", " \n", " for batch in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{epochs} - Training\"):\n", " cve_input = {'input_ids': batch['cve_input_ids'].to(device), 'attention_mask': batch['cve_attention_mask'].to(device)}\n", " tech_input = {'input_ids': batch['tech_input_ids'].to(device), 'attention_mask': batch['tech_attention_mask'].to(device)}\n", " labels, weights = batch['labels'].to(device), batch['role_weights'].to(device)\n", " \n", " optimizer.zero_grad()\n", " outputs, cont_loss = model(cve_input, tech_input, labels)\n", " \n", " total_loss = criterion(outputs, labels, weights) + cont_loss * model.contrastive_weight\n", " total_loss.backward()\n", " optimizer.step()\n", " \n", " train_loss += total_loss.item()\n", " cont_loss_total += cont_loss.item()\n", " preds = torch.sign(outputs)\n", " train_correct += (preds == labels).sum().item()\n", " train_total += labels.size(0)\n", " \n", " history['train_loss'].append(train_loss / len(train_loader))\n", " history['train_acc'].append(train_correct / train_total)\n", " \n", " if val_loader is not None:\n", " val_acc, val_loss = validate_model(model, val_loader, criterion, device)\n", " history['val_loss'].append(val_loss)\n", " history['val_acc'].append(val_acc)\n", " if val_acc > best_val_acc:\n", " best_val_acc = val_acc\n", " patience_counter = 0\n", " torch.save(model.state_dict(), best_model_path)\n", " else:\n", " patience_counter += 1\n", " if patience_counter >= patience:\n", " model.load_state_dict(torch.load(best_model_path))\n", " break\n", " plot_training_history(history, results_dir)\n", " return model\n", "\n", "def validate_model(model, val_loader, criterion, device):\n", " model.eval()\n", " val_loss = val_correct = val_total = 0\n", " with torch.no_grad():\n", " for batch in val_loader:\n", " cve_input = {'input_ids': batch['cve_input_ids'].to(device), 'attention_mask': batch['cve_attention_mask'].to(device)}\n", " tech_input = {'input_ids': batch['tech_input_ids'].to(device), 'attention_mask': batch['tech_attention_mask'].to(device)}\n", " labels, weights = batch['labels'].to(device), batch['role_weights'].to(device)\n", " outputs, cont_loss = model(cve_input, tech_input, labels)\n", " val_loss += (criterion(outputs, labels, weights) + cont_loss * model.contrastive_weight).item()\n", " val_correct += (torch.sign(outputs) == labels).sum().item()\n", " val_total += labels.size(0)\n", " return val_correct/val_total, val_loss/len(val_loader)\n", "\n", "def plot_training_history(history, results_dir):\n", " plt.figure(figsize=(12, 5))\n", " plt.subplot(1, 2, 1)\n", " plt.plot(history['train_loss'], label='Train Loss')\n", " if 'val_loss' in history and history['val_loss']: plt.plot(history['val_loss'], label='Val Loss')\n", " plt.legend()\n", " plt.title('Loss Curves')\n", " plt.subplot(1, 2, 2)\n", " plt.plot(history['train_acc'], label='Train Accuracy')\n", " if 'val_acc' in history and history['val_acc']: plt.plot(history['val_acc'], label='Val Accuracy')\n", " plt.legend()\n", " plt.title('Accuracy Curves')\n", " plt.savefig(os.path.join(results_dir, 'training_history.png'))\n", " plt.close()\n", "\n", "def evaluate_model(model, test_loader, device, results_dir):\n", " model.eval()\n", " all_preds, all_true, test_correct, test_total = [], [], 0, 0\n", " test_samples = {'cve_text': [], 'tech_text': [], 'true_label': [], 'predicted_label': [], 'confidence_score': []}\n", " \n", " with torch.no_grad():\n", " for batch in tqdm(test_loader, desc=\"Evaluating on test set\"):\n", " cve_input = {'input_ids': batch['cve_input_ids'].to(device), 'attention_mask': batch['cve_attention_mask'].to(device)}\n", " tech_input = {'input_ids': batch['tech_input_ids'].to(device), 'attention_mask': batch['tech_attention_mask'].to(device)}\n", " labels = batch['labels'].to(device)\n", " \n", " outputs, _ = model(cve_input, tech_input, labels)\n", " preds = torch.sign(outputs)\n", " test_correct += (preds == labels).sum().item()\n", " test_total += labels.size(0)\n", " all_preds.extend(preds.cpu().numpy())\n", " all_true.extend(labels.cpu().numpy())\n", " \n", " for i in range(len(batch['cve_input_ids'])):\n", " test_samples['cve_text'].append(batch['CVE_text'][i])\n", " test_samples['tech_text'].append(batch['Technique_text'][i])\n", " test_samples['true_label'].append(float(labels[i].cpu().numpy()))\n", " test_samples['predicted_label'].append(float(preds[i].cpu().numpy()))\n", " test_samples['confidence_score'].append(float(outputs[i].cpu().numpy()))\n", " \n", " test_acc = test_correct / test_total\n", " test_results_df = pd.DataFrame(test_samples)\n", " test_results_df['true_label'] = (test_results_df['true_label'] + 1) / 2\n", " test_results_df['predicted_label'] = (test_results_df['predicted_label'] + 1) / 2\n", " test_results_df['correct'] = test_results_df['true_label'] == test_results_df['predicted_label']\n", " test_results_df.to_csv(os.path.join(results_dir, 'test_results_detailed.csv'), index=False)\n", " \n", " all_true_01 = [(l + 1) / 2 for l in all_true]\n", " all_preds_01 = [(p + 1) / 2 for p in all_preds]\n", " class_report = classification_report(all_true_01, all_preds_01, output_dict=True)\n", " \n", " cm = confusion_matrix(all_true_01, all_preds_01)\n", " plt.figure(figsize=(8, 6))\n", " sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')\n", " plt.title('Confusion Matrix')\n", " plt.savefig(os.path.join(results_dir, 'confusion_matrix.png'))\n", " plt.close()\n", " \n", " metrics = {\n", " 'accuracy': class_report['accuracy'],\n", " 'precision': class_report['1.0']['precision'] if '1.0' in class_report else 0,\n", " 'recall': class_report['1.0']['recall'] if '1.0' in class_report else 0,\n", " 'f1_score': class_report['1.0']['f1-score'] if '1.0' in class_report else 0\n", " }\n", " return test_acc, test_results_df, metrics\n", "\n", "def main():\n", " print(f\"Loading dataset for {MODEL_NAME}...\")\n", " df = load_srl_dataset(DATASET_PATH)\n", " if df.empty: return\n", " \n", " tokenizer = AutoTokenizer.from_pretrained(MODEL_STRING)\n", " device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", " \n", " for current_seed in SEEDS_TO_RUN:\n", " print(f\"\\n{'='*50}\\nSTARTING RUN FOR {MODEL_NAME} - SEED: {current_seed}\\n{'='*50}\")\n", " set_all_seeds(current_seed)\n", " results_dir = f\"./attackbert/results_{MODEL_NAME}_seed_{current_seed}\"\n", " os.makedirs(results_dir, exist_ok=True)\n", " \n", " train_df, val_df, test_df = get_splits_for_model(df, random_state=current_seed)\n", " best_model_path = os.path.join(results_dir, \"best_model.pth\")\n", " \n", " if os.path.exists(best_model_path):\n", " model = EnhancedContrastiveSRLModel(model_name=MODEL_STRING).to(device)\n", " model.load_state_dict(torch.load(best_model_path, map_location=device))\n", " else:\n", " model = train_hinge_model(train_df=train_df, val_df=val_df, results_dir=results_dir, epochs=10)\n", " \n", " test_loader = DataLoader(HingeSiameseDataset(test_df, tokenizer), batch_size=16)\n", " test_acc, _, metrics = evaluate_model(model, test_loader, device, results_dir)\n", " \n", " seed_result = {\n", " \"model\": MODEL_NAME, \"seed\": current_seed, \"accuracy\": float(metrics['accuracy']),\n", " \"precision\": float(metrics['precision']), \"recall\": float(metrics['recall']), \"f1\": float(metrics['f1_score'])\n", " }\n", " with open(os.path.join(results_dir, f\"metrics_{MODEL_NAME}_seed_{current_seed}.json\"), \"w\") as f:\n", " json.dump(seed_result, f, indent=2)\n", " \n", " print(f\"Generating t-SNE for seed {current_seed}...\")\n", " vis_sample = test_df.sample(min(500, len(test_df))).reset_index(drop=True)\n", " vis_loader = DataLoader(HingeSiameseDataset(vis_sample, tokenizer), batch_size=16)\n", " cve_emb_list, tech_emb_list, labels_list = [], [], []\n", " \n", " model.eval()\n", " with torch.no_grad():\n", " for batch in vis_loader:\n", " cve_input = {'input_ids': batch['cve_input_ids'].to(device), 'attention_mask': batch['cve_attention_mask'].to(device)}\n", " tech_input = {'input_ids': batch['tech_input_ids'].to(device), 'attention_mask': batch['tech_attention_mask'].to(device)}\n", " cve_emb_list.extend(model.get_embeddings(cve_input))\n", " tech_emb_list.extend(model.get_embeddings(tech_input))\n", " labels_list.extend(batch['labels'].numpy())\n", " \n", " cve_emb_list, tech_emb_list, labels_list = np.array(cve_emb_list), np.array(tech_emb_list), np.array(labels_list)\n", " combined = np.vstack([cve_emb_list, tech_emb_list])\n", " types = np.concatenate([np.zeros(len(cve_emb_list)), np.ones(len(tech_emb_list))])\n", " binary_labels = (np.concatenate([labels_list, labels_list]) + 1) / 2\n", " \n", " reduced = TSNE(n_components=2, random_state=current_seed, perplexity=30).fit_transform(combined)\n", " plt.figure(figsize=(12, 10))\n", " markers, colors = {0: 'o', 1: '^'}, {0: 'red', 1: 'blue'}\n", " for t in [0, 1]:\n", " for l in [0, 1]:\n", " mask = (types == t) & (binary_labels == l)\n", " plt.scatter(reduced[mask, 0], reduced[mask, 1], marker=markers[t], c=colors[l], alpha=0.7)\n", " plt.title(f't-SNE Visualization (Seed {current_seed})')\n", " plt.savefig(os.path.join(results_dir, 'embeddings_visualization.png'))\n", " plt.close()\n", "\n", "if __name__ == \"__main__\":\n", " main()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kaggle": { "accelerator": "nvidiaTeslaT4", "dataSources": [ { "datasetId": 8368214, "sourceId": 13203913, "sourceType": "datasetVersion" } ], "dockerImageVersionId": 31090, "isGpuEnabled": true, "isInternetEnabled": true, "language": "python", "sourceType": "notebook" }, "kernelspec": { "display_name": "Python3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.13" } }, "nbformat": 4, "nbformat_minor": 4 }