| | from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer |
| | from datasets import load_dataset |
| | import torch |
| |
|
| | |
| | model_name = "bert-base-uncased" |
| | tokenizer = AutoTokenizer.from_pretrained(model_name) |
| | model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=3) |
| |
|
| | |
| | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| | model.to(device) |
| | print("Using device:", device) |
| |
|
| | |
| | dataset = load_dataset("csv", data_files="Qbias/cleaned_qbias_balanced.csv")["train"] |
| |
|
| | |
| | dataset = dataset.train_test_split(test_size=0.1) |
| |
|
| | |
| | def tokenize_function(example): |
| | return tokenizer(example["text"], padding="max_length", truncation=True, max_length=512) |
| |
|
| | |
| | tokenized_dataset = dataset.map(tokenize_function, batched=True) |
| |
|
| | |
| | tokenized_dataset.set_format(type="torch", columns=["input_ids", "attention_mask", |
| | "label"]) |
| |
|
| | |
| | training_args = TrainingArguments( |
| | output_dir="./bert-bias-detector", |
| | evaluation_strategy="epoch", |
| | save_strategy="epoch", |
| | per_device_train_batch_size=8, |
| | per_device_eval_batch_size=8, |
| | num_train_epochs=3, |
| | weight_decay=0.01, |
| | logging_dir="./logs", |
| | logging_steps=500, |
| | ) |
| |
|
| | |
| | trainer = Trainer( |
| | model=model, |
| | args=training_args, |
| | train_dataset=tokenized_dataset["train"], |
| | eval_dataset=tokenized_dataset["test"], |
| | tokenizer=tokenizer, |
| | ) |
| |
|
| | |
| | trainer.train() |