annabellatian commited on
Commit
939ad05
·
verified ·
1 Parent(s): 20f1188

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +118 -10
README.md CHANGED
@@ -1,10 +1,118 @@
1
- ---
2
- title: README
3
- emoji: 🏃
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Edit this `README.md` markdown file to author your organization card.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """CIS 5190 Transformer Model
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1Iy-nQcufaF7--hI9He7Dp9FsW1TomgrP
8
+ """
9
+
10
+ import pandas as pd
11
+ from sklearn.model_selection import train_test_split
12
+ from sklearn.metrics import accuracy_score, classification_report
13
+ import torch
14
+ from torch.utils.data import Dataset, DataLoader
15
+ from transformers import BertTokenizer, BertForSequenceClassification, AdamW
16
+ from transformers import get_scheduler
17
+ from google.colab import drive
18
+
19
+ drive.mount('/content/drive')
20
+
21
+ # SET DATASET PATH HERE
22
+ dataset_path = '/content/drive/My Drive/24 Fall/CIS 5190/CIS 5190 Final Project/test_data_random_subset.csv'
23
+
24
+ news_df = pd.read_csv(dataset_path)
25
+
26
+ X = news_df['title']
27
+ y = news_df['labels']
28
+
29
+ # y = y.apply(lambda x: 1 if x == 'FoxNews' else 0)
30
+
31
+ # Split the data into training and testing sets (80% train, 20% test)
32
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, shuffle=True, stratify=y)
33
+
34
+ # Tokenize the text using a BERT tokenizer
35
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
36
+
37
+ def tokenize_data(texts, tokenizer, max_len=128):
38
+ return tokenizer(
39
+ list(texts),
40
+ padding=True,
41
+ truncation=True,
42
+ max_length=max_len,
43
+ return_tensors="pt"
44
+ )
45
+
46
+ # Tokenize the training and test datasets
47
+ train_encodings = tokenize_data(X_train, tokenizer)
48
+ test_encodings = tokenize_data(X_test, tokenizer)
49
+
50
+ # Create a custom dataset class
51
+ class NewsDataset(Dataset):
52
+ def __init__(self, encodings, labels):
53
+ self.encodings = encodings
54
+ self.labels = labels
55
+
56
+ def __len__(self):
57
+ return len(self.labels)
58
+
59
+ def __getitem__(self, idx):
60
+ item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
61
+ item['labels'] = torch.tensor(self.labels[idx])
62
+ return item
63
+
64
+ train_dataset = NewsDataset(train_encodings, y_train.tolist())
65
+ test_dataset = NewsDataset(test_encodings, y_test.tolist())
66
+
67
+ # Load DataLoader for batching
68
+ train_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)
69
+ test_loader = DataLoader(test_dataset, batch_size=16)
70
+
71
+ # Define the model
72
+ model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=2)
73
+
74
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
75
+ model.to(device)
76
+
77
+ # Define optimizer and scheduler
78
+ optimizer = AdamW(model.parameters(), lr=5e-5)
79
+ num_training_steps = len(train_loader) * 4 # Assume 4 epochs
80
+ lr_scheduler = get_scheduler("linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps)
81
+
82
+ # Train the model
83
+ def train_model(model, train_loader, optimizer, scheduler, epochs=4):
84
+ model.train()
85
+ for epoch in range(epochs):
86
+ epoch_loss = 0
87
+ for batch in train_loader:
88
+ batch = {k: v.to(device) for k, v in batch.items()}
89
+ outputs = model(**batch)
90
+ loss = outputs.loss
91
+ loss.backward()
92
+ optimizer.step()
93
+ scheduler.step()
94
+ optimizer.zero_grad()
95
+ epoch_loss += loss.item()
96
+ print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss/len(train_loader):.4f}")
97
+
98
+ # Evaluate the model
99
+ def evaluate_model(model, test_loader):
100
+ model.eval()
101
+ y_true, y_pred = [], []
102
+ with torch.no_grad():
103
+ for batch in test_loader:
104
+ batch = {k: v.to(device) for k, v in batch.items()}
105
+ outputs = model(**batch)
106
+ logits = outputs.logits
107
+ predictions = torch.argmax(logits, dim=-1)
108
+ y_true.extend(batch['labels'].tolist())
109
+ y_pred.extend(predictions.tolist())
110
+ return y_true, y_pred
111
+
112
+ train_model(model, train_loader, optimizer, lr_scheduler)
113
+
114
+ y_true, y_pred = evaluate_model(model, test_loader)
115
+
116
+ # 11. Print evaluation metrics
117
+ print(f"Accuracy: {accuracy_score(y_true, y_pred):.4f}")
118
+ print("Classification Report:\n", classification_report(y_true, y_pred))