named_entity_recognition / Bert_Model.py
akaarr's picture
Upload Bert_Model.py
04b0398 verified
import pandas as pd
import numpy as np
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer
from seqeval.metrics import classification_report
import torch
def main():
# Verify GPU availability
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Load dataset from CSV
df = pd.read_excel('Augmented_Dataset.xlsx')
# Clean the data
df = df.dropna(subset=['Word', 'Tag'])
df['Word'] = df['Word'].astype(str)
df['Tag'] = df['Tag'].astype(str)
# Group sentences
grouped_data = df.groupby("Sentence").apply(lambda s: {
'words': s['Word'].tolist(),
'labels': s['Tag'].tolist()
}).tolist()
# Convert grouped data to Dataset
dataset = Dataset.from_list(grouped_data)
print(f"Total dataset size: {len(dataset)}")
# Split into train and test sets
train_test_split = dataset.train_test_split(test_size=0.2)
train_dataset = train_test_split['train']
test_dataset = train_test_split['test']
# Load the tokenizer
tokenizer = AutoTokenizer.from_pretrained("abdulhade/RoBERTa-large-SizeCorpus_1B")
# Map labels to unique IDs
unique_labels = list(set(df['Tag']))
label2id = {label: i for i, label in enumerate(unique_labels)}
id2label = {i: label for label, i in label2id.items()}
# Tokenize and align labels
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples['words'],
truncation=True,
is_split_into_words=True,
padding='max_length',
max_length=128
)
labels = []
for i, label in enumerate(examples['labels']):
word_ids = tokenized_inputs.word_ids(batch_index=i)
label_ids = [-100 if word_id is None else label2id[label[word_id]] for word_id in word_ids]
labels.append(label_ids)
tokenized_inputs["labels"] = labels
return tokenized_inputs
# Apply tokenization to datasets without parallel processing
train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True)
test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True)
# Load the model
model = AutoModelForTokenClassification.from_pretrained(
"abdulhade/RoBERTa-large-SizeCorpus_1B",
num_labels=len(unique_labels),
id2label=id2label,
label2id=label2id
).to(device)
# Set up training arguments
training_args = TrainingArguments(
output_dir='results',
evaluation_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=64, # Adjusted for 8GB VRAM
per_device_eval_batch_size=64,
num_train_epochs=50, # Increased to 50
weight_decay=0.01,
save_steps=5000,
save_total_limit=2,
logging_dir='./logs',
fp16=True, # Use mixed precision for faster computation
)
# Initialize Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=test_dataset,
tokenizer=tokenizer,
)
# Train the model
trainer.train()
# Save the trained model and tokenizer
output_dir = 'NER_RoBERTa_fineTuning'
model.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
print(f"Model and tokenizer saved to {output_dir}")
# Evaluate the model
predictions, labels, _ = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
true_labels = [[id2label[label] for label in label_set if label != -100] for label_set in labels]
true_predictions = [[id2label[pred] for pred, label in zip(pred_set, label_set) if label != -100]
for pred_set, label_set in zip(predictions, labels)]
# Print classification report
print(classification_report(true_labels, true_predictions))
if __name__ == "__main__":
main()