akaarr commited on
Commit
04b0398
·
verified ·
1 Parent(s): 8dba096

Upload Bert_Model.py

Browse files
Files changed (1) hide show
  1. Bert_Model.py +122 -0
Bert_Model.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import numpy as np
3
+ from datasets import Dataset
4
+ from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer
5
+ from seqeval.metrics import classification_report
6
+ import torch
7
+
8
+
9
+ def main():
10
+ # Verify GPU availability
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ print(f"Using device: {device}")
13
+
14
+ # Load dataset from CSV
15
+ df = pd.read_excel('Augmented_Dataset.xlsx')
16
+
17
+ # Clean the data
18
+ df = df.dropna(subset=['Word', 'Tag'])
19
+ df['Word'] = df['Word'].astype(str)
20
+ df['Tag'] = df['Tag'].astype(str)
21
+
22
+ # Group sentences
23
+ grouped_data = df.groupby("Sentence").apply(lambda s: {
24
+ 'words': s['Word'].tolist(),
25
+ 'labels': s['Tag'].tolist()
26
+ }).tolist()
27
+
28
+ # Convert grouped data to Dataset
29
+ dataset = Dataset.from_list(grouped_data)
30
+ print(f"Total dataset size: {len(dataset)}")
31
+
32
+ # Split into train and test sets
33
+ train_test_split = dataset.train_test_split(test_size=0.2)
34
+ train_dataset = train_test_split['train']
35
+ test_dataset = train_test_split['test']
36
+
37
+ # Load the tokenizer
38
+ tokenizer = AutoTokenizer.from_pretrained("abdulhade/RoBERTa-large-SizeCorpus_1B")
39
+
40
+ # Map labels to unique IDs
41
+ unique_labels = list(set(df['Tag']))
42
+ label2id = {label: i for i, label in enumerate(unique_labels)}
43
+ id2label = {i: label for label, i in label2id.items()}
44
+
45
+ # Tokenize and align labels
46
+ def tokenize_and_align_labels(examples):
47
+ tokenized_inputs = tokenizer(
48
+ examples['words'],
49
+ truncation=True,
50
+ is_split_into_words=True,
51
+ padding='max_length',
52
+ max_length=128
53
+ )
54
+ labels = []
55
+ for i, label in enumerate(examples['labels']):
56
+ word_ids = tokenized_inputs.word_ids(batch_index=i)
57
+ label_ids = [-100 if word_id is None else label2id[label[word_id]] for word_id in word_ids]
58
+ labels.append(label_ids)
59
+
60
+ tokenized_inputs["labels"] = labels
61
+ return tokenized_inputs
62
+
63
+ # Apply tokenization to datasets without parallel processing
64
+ train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True)
65
+ test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True)
66
+
67
+ # Load the model
68
+ model = AutoModelForTokenClassification.from_pretrained(
69
+ "abdulhade/RoBERTa-large-SizeCorpus_1B",
70
+ num_labels=len(unique_labels),
71
+ id2label=id2label,
72
+ label2id=label2id
73
+ ).to(device)
74
+
75
+ # Set up training arguments
76
+ training_args = TrainingArguments(
77
+ output_dir='results',
78
+ evaluation_strategy="epoch",
79
+ learning_rate=2e-5,
80
+ per_device_train_batch_size=64, # Adjusted for 8GB VRAM
81
+ per_device_eval_batch_size=64,
82
+ num_train_epochs=50, # Increased to 50
83
+ weight_decay=0.01,
84
+ save_steps=5000,
85
+ save_total_limit=2,
86
+ logging_dir='./logs',
87
+ fp16=True, # Use mixed precision for faster computation
88
+ )
89
+
90
+ # Initialize Trainer
91
+ trainer = Trainer(
92
+ model=model,
93
+ args=training_args,
94
+ train_dataset=train_dataset,
95
+ eval_dataset=test_dataset,
96
+ tokenizer=tokenizer,
97
+ )
98
+
99
+ # Train the model
100
+ trainer.train()
101
+
102
+ # Save the trained model and tokenizer
103
+ output_dir = 'NER_RoBERTa_fineTuning'
104
+ model.save_pretrained(output_dir)
105
+ tokenizer.save_pretrained(output_dir)
106
+
107
+ print(f"Model and tokenizer saved to {output_dir}")
108
+
109
+ # Evaluate the model
110
+ predictions, labels, _ = trainer.predict(test_dataset)
111
+ predictions = np.argmax(predictions, axis=2)
112
+
113
+ true_labels = [[id2label[label] for label in label_set if label != -100] for label_set in labels]
114
+ true_predictions = [[id2label[pred] for pred, label in zip(pred_set, label_set) if label != -100]
115
+ for pred_set, label_set in zip(predictions, labels)]
116
+
117
+ # Print classification report
118
+ print(classification_report(true_labels, true_predictions))
119
+
120
+
121
+ if __name__ == "__main__":
122
+ main()