File size: 3,278 Bytes
208266a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import os
from datasets import load_dataset
from transformers import RobertaTokenizer, RobertaForSequenceClassification, Trainer, TrainingArguments
from peft import LoraConfig, get_peft_model, TaskType
from sklearn.metrics import accuracy_score, f1_score
import torch
import numpy as np
from transformers import set_seed
from transformers import DataCollatorWithPadding
from datetime import datetime
from src.config import HF_ENDPOINT, HF_TOKEN, MODEL_DIR


if HF_ENDPOINT:
    os.environ["HF_ENDPOINT"] = HF_ENDPOINT


set_seed(42)
np.random.seed(42)
torch.manual_seed(42)

output_dir=os.path.join(MODEL_DIR, "bias_checkpoints")
os.makedirs(MODEL_DIR, exist_ok=True)


def main():

    dataset = load_dataset("mediabiasgroup/BABE", token=HF_TOKEN)
    dataset = dataset["train"].train_test_split(test_size=0.2, seed=42)
    model_name = "roberta-base"
    tokenizer  = RobertaTokenizer.from_pretrained(model_name, token=HF_TOKEN)

    def collapse_labels(example):
        old = example["label"]

        if old in [0, 1]:
            example["label"] = 0   # Right
        elif old == 2:
            example["label"] = 1   # Center
        else:
            example["label"] = 2   # Left

        return example

    #dataset = dataset.map(collapse_labels)

    def tokenize_function(examples):
        return tokenizer(examples["text"], truncation=True, max_length=128)
    data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
    tokenized_datasets = dataset.map(tokenize_function, batched = True)
    tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
    tokenized_datasets.set_format("torch", columns=["input_ids", "attention_mask", "labels"])

    model = RobertaForSequenceClassification.from_pretrained(
        model_name,
        num_labels=2,
        token=HF_TOKEN,
    )

    peft_config = LoraConfig(
        task_type=TaskType.SEQ_CLS,
        r=8,
        lora_alpha=32,
        lora_dropout=0.1,
        target_modules=["query", "value"]
    )

    model = get_peft_model(model, peft_config)
    model.print_trainable_parameters()

    training_args = TrainingArguments(
        output_dir=output_dir,
        learning_rate=2e-4,             
        per_device_train_batch_size=8,  
        per_device_eval_batch_size=8,
        num_train_epochs=3,           
        eval_strategy="epoch",    
        save_strategy="epoch",
        logging_steps=10,
        report_to="none"
    )

    def compute_metrics(eval_pred):
        logits, labels = eval_pred
        preds = np.argmax(logits, axis=1)

        return {
            "accuracy": accuracy_score(labels, preds),
            "f1_weighted": f1_score(labels, preds, average="weighted")
        }


    trainer = Trainer(
        model=model,
        args=training_args,
        train_dataset=tokenized_datasets["train"],
        eval_dataset=tokenized_datasets["test"],
        compute_metrics=compute_metrics,
        data_collator=data_collator
    )

    trainer.train()

    # CRITICAL FIX
    model = model.merge_and_unload()

    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    save_path = os.path.join(MODEL_DIR, f"bias_lora_{timestamp}")

    model.save_pretrained(save_path)
    tokenizer.save_pretrained(save_path)

if __name__ == "__main__":
    main()