subbunanepalli commited on
Commit
55fef5b
·
verified ·
1 Parent(s): 6fa60e0

Create bert_multilabel_model.pth

Browse files
Files changed (1) hide show
  1. bert_multilabel_model.pth +115 -0
bert_multilabel_model.pth ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Install required packages
2
+ !pip install -q transformers scikit-learn
3
+
4
+ # Imports
5
+ import pandas as pd
6
+ import torch
7
+ import torch.nn as nn
8
+ from torch.utils.data import Dataset, DataLoader
9
+ from sklearn.model_selection import train_test_split
10
+ from sklearn.preprocessing import MultiLabelBinarizer
11
+ from transformers import BertTokenizer, BertModel
12
+ from torch.optim import Adam
13
+
14
+ # Constants
15
+ TEXT_COLUMN = 'Sanction_Context'
16
+ LABEL_COLUMNS = [
17
+ 'Red_Flag_Reason',
18
+ 'Maker_Action',
19
+ 'Escalation_Level',
20
+ 'Risk_Category',
21
+ 'Risk_Drivers',
22
+ 'Investigation_Outcome'
23
+ ]
24
+
25
+ # Load and clean data
26
+ df = pd.read_csv('/kaggle/input/systhesis/synthetic_transactions_samples_5000.csv')
27
+ df = df.dropna(subset=[TEXT_COLUMN])
28
+ df[LABEL_COLUMNS] = df[LABEL_COLUMNS].fillna('Unknown') # Fill missing labels
29
+
30
+ # Encode labels using MultiLabelBinarizer
31
+ mlb = MultiLabelBinarizer()
32
+ Y = mlb.fit_transform(df[LABEL_COLUMNS].astype(str).values.tolist())
33
+ X = df[TEXT_COLUMN].tolist()
34
+
35
+ # Save the label classes for decoding later
36
+ import pickle
37
+ with open("mlb_classes.pkl", "wb") as f:
38
+ pickle.dump(mlb.classes_, f)
39
+
40
+ # Tokenizer
41
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
42
+
43
+ # Dataset Class
44
+ class BertMultiLabelDataset(Dataset):
45
+ def __init__(self, texts, labels, tokenizer, max_len=128):
46
+ self.texts = texts
47
+ self.labels = labels
48
+ self.tokenizer = tokenizer
49
+ self.max_len = max_len
50
+
51
+ def __getitem__(self, idx):
52
+ encoding = self.tokenizer(
53
+ self.texts[idx],
54
+ padding='max_length',
55
+ truncation=True,
56
+ max_length=self.max_len,
57
+ return_tensors="pt"
58
+ )
59
+ item = {key: val.squeeze(0) for key, val in encoding.items()}
60
+ item['labels'] = torch.FloatTensor(self.labels[idx])
61
+ return item
62
+
63
+ def __len__(self):
64
+ return len(self.texts)
65
+
66
+ # Model Definition
67
+ class BertForMultiLabel(nn.Module):
68
+ def __init__(self, num_labels):
69
+ super().__init__()
70
+ self.bert = BertModel.from_pretrained('bert-base-uncased')
71
+ self.dropout = nn.Dropout(0.3)
72
+ self.classifier = nn.Linear(self.bert.config.hidden_size, num_labels)
73
+
74
+ def forward(self, input_ids, attention_mask):
75
+ outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
76
+ pooled_output = self.dropout(outputs.pooler_output)
77
+ logits = self.classifier(pooled_output)
78
+ return logits
79
+
80
+ # Prepare data
81
+ X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
82
+
83
+ train_dataset = BertMultiLabelDataset(X_train, Y_train, tokenizer)
84
+ train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
85
+
86
+ # Device setup
87
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
88
+ print(f"Training on device: {device}")
89
+
90
+ # Model, optimizer, loss
91
+ model = BertForMultiLabel(num_labels=Y.shape[1]).to(device)
92
+ optimizer = Adam(model.parameters(), lr=2e-5)
93
+ loss_fn = nn.BCEWithLogitsLoss()
94
+
95
+ # Training loop
96
+ for epoch in range(3):
97
+ model.train()
98
+ total_loss = 0
99
+ for i, batch in enumerate(train_loader):
100
+ input_ids = batch['input_ids'].to(device)
101
+ attention_mask = batch['attention_mask'].to(device)
102
+ labels = batch['labels'].to(device)
103
+
104
+ optimizer.zero_grad()
105
+ logits = model(input_ids, attention_mask)
106
+ loss = loss_fn(logits, labels)
107
+ loss.backward()
108
+ optimizer.step()
109
+
110
+ total_loss += loss.item()
111
+ if i % 10 == 0:
112
+ print(f"Epoch {epoch+1}, Step {i}, Loss: {loss.item():.4f}")
113
+
114
+ avg_loss = total_loss / len(train_loader)
115
+ print(f"Epoch {epoch+1} finished. Avg Loss: {avg_loss:.4f}")