File size: 3,409 Bytes
5f1cf2c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import json, argparse
from datasets import Dataset
from transformers import AutoTokenizer, AutoModelForTokenClassification, DataCollatorForTokenClassification, TrainingArguments, Trainer
from training.utils import compute_metrics_ner

parser = argparse.ArgumentParser()
parser.add_argument("--model_name", default="bert-base-cased")
parser.add_argument("--train_json", required=True, help="JSONL with {'tokens': [...], 'ner_tags': [...]} per line")
parser.add_argument("--eval_json", required=True)
parser.add_argument("--text_col", default="tokens")
parser.add_argument("--label_col", default="ner_tags")
parser.add_argument("--labels_file", default="training/labels_ner.json")
parser.add_argument("--output_dir", default="./outputs/ner")
parser.add_argument("--epochs", type=int, default=5)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--lr", type=float, default=3e-5)
args = parser.parse_args()

def load_jsonl(path):
    rows = []
    with open(path, "r", encoding="utf-8") as f:
        for line in f:
            rows.append(json.loads(line))
    return rows

train_rows = load_jsonl(args.train_json)
eval_rows  = load_jsonl(args.eval_json)

with open(args.labels_file, "r") as f:
    label_list = json.load(f)  # e.g., ["O","B-ORG","I-ORG","B-MONEY","I-MONEY","B-DATE","I-DATE","B-TICKER","I-TICKER"]

tokenizer = AutoTokenizer.from_pretrained(args.model_name)

def align_labels_with_tokens(tokens, labels):
    # labels are per-token already; convert to ids
    label2id = {l:i for i,l in enumerate(label_list)}
    return [label2id[l] for l in labels]

def encode_batch(batch):
    tokenized = tokenizer(batch[args.text_col], is_split_into_words=True, truncation=True, padding=True)
    encoded_labels = []
    for i, labels in enumerate(batch[args.label_col]):
        word_ids = tokenized.word_ids(batch_index=i)
        label_ids = []
        j = 0
        for w_id in word_ids:
            if w_id is None:
                label_ids.append(-100)
            else:
                label_ids.append(align_labels_with_tokens(batch[args.text_col][i], labels)[w_id])
        encoded_labels.append(label_ids)
    tokenized["labels"] = encoded_labels
    return tokenized

train_ds = Dataset.from_list(train_rows).map(encode_batch, batched=True, remove_columns=[args.text_col, args.label_col])
eval_ds  = Dataset.from_list(eval_rows).map(encode_batch,  batched=True, remove_columns=[args.text_col, args.label_col])

model = AutoModelForTokenClassification.from_pretrained(
    args.model_name, num_labels=len(label_list), id2label={i:l for i,l in enumerate(label_list)}, label2id={l:i for i,l in enumerate(label_list)}
)

data_collator = DataCollatorForTokenClassification(tokenizer)

training_args = TrainingArguments(
    output_dir=args.output_dir,
    evaluation_strategy="epoch",
    learning_rate=args.lr,
    per_device_train_batch_size=args.batch_size,
    per_device_eval_batch_size=args.batch_size,
    num_train_epochs=args.epochs,
    weight_decay=0.01,
    load_best_model_at_end=True,
    metric_for_best_model="f1",
)

trainer = Trainer(
    model=model,
    args=training_args,
    train_dataset=train_ds,
    eval_dataset=eval_ds,
    tokenizer=tokenizer,
    data_collator=data_collator,
    compute_metrics=lambda p: compute_metrics_ner(p, label_list),
)

trainer.train()
trainer.save_model(args.output_dir)
tokenizer.save_pretrained(args.output_dir)