Create training/train_ner.py
Browse files- training/train_ner.py +88 -0
training/train_ner.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json, argparse
|
| 2 |
+
from datasets import Dataset
|
| 3 |
+
from transformers import AutoTokenizer, AutoModelForTokenClassification, DataCollatorForTokenClassification, TrainingArguments, Trainer
|
| 4 |
+
from training.utils import compute_metrics_ner
|
| 5 |
+
|
| 6 |
+
parser = argparse.ArgumentParser()
|
| 7 |
+
parser.add_argument("--model_name", default="bert-base-cased")
|
| 8 |
+
parser.add_argument("--train_json", required=True, help="JSONL with {'tokens': [...], 'ner_tags': [...]} per line")
|
| 9 |
+
parser.add_argument("--eval_json", required=True)
|
| 10 |
+
parser.add_argument("--text_col", default="tokens")
|
| 11 |
+
parser.add_argument("--label_col", default="ner_tags")
|
| 12 |
+
parser.add_argument("--labels_file", default="training/labels_ner.json")
|
| 13 |
+
parser.add_argument("--output_dir", default="./outputs/ner")
|
| 14 |
+
parser.add_argument("--epochs", type=int, default=5)
|
| 15 |
+
parser.add_argument("--batch_size", type=int, default=8)
|
| 16 |
+
parser.add_argument("--lr", type=float, default=3e-5)
|
| 17 |
+
args = parser.parse_args()
|
| 18 |
+
|
| 19 |
+
def load_jsonl(path):
|
| 20 |
+
rows = []
|
| 21 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 22 |
+
for line in f:
|
| 23 |
+
rows.append(json.loads(line))
|
| 24 |
+
return rows
|
| 25 |
+
|
| 26 |
+
train_rows = load_jsonl(args.train_json)
|
| 27 |
+
eval_rows = load_jsonl(args.eval_json)
|
| 28 |
+
|
| 29 |
+
with open(args.labels_file, "r") as f:
|
| 30 |
+
label_list = json.load(f) # e.g., ["O","B-ORG","I-ORG","B-MONEY","I-MONEY","B-DATE","I-DATE","B-TICKER","I-TICKER"]
|
| 31 |
+
|
| 32 |
+
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
|
| 33 |
+
|
| 34 |
+
def align_labels_with_tokens(tokens, labels):
|
| 35 |
+
# labels are per-token already; convert to ids
|
| 36 |
+
label2id = {l:i for i,l in enumerate(label_list)}
|
| 37 |
+
return [label2id[l] for l in labels]
|
| 38 |
+
|
| 39 |
+
def encode_batch(batch):
|
| 40 |
+
tokenized = tokenizer(batch[args.text_col], is_split_into_words=True, truncation=True, padding=True)
|
| 41 |
+
encoded_labels = []
|
| 42 |
+
for i, labels in enumerate(batch[args.label_col]):
|
| 43 |
+
word_ids = tokenized.word_ids(batch_index=i)
|
| 44 |
+
label_ids = []
|
| 45 |
+
j = 0
|
| 46 |
+
for w_id in word_ids:
|
| 47 |
+
if w_id is None:
|
| 48 |
+
label_ids.append(-100)
|
| 49 |
+
else:
|
| 50 |
+
label_ids.append(align_labels_with_tokens(batch[args.text_col][i], labels)[w_id])
|
| 51 |
+
encoded_labels.append(label_ids)
|
| 52 |
+
tokenized["labels"] = encoded_labels
|
| 53 |
+
return tokenized
|
| 54 |
+
|
| 55 |
+
train_ds = Dataset.from_list(train_rows).map(encode_batch, batched=True, remove_columns=[args.text_col, args.label_col])
|
| 56 |
+
eval_ds = Dataset.from_list(eval_rows).map(encode_batch, batched=True, remove_columns=[args.text_col, args.label_col])
|
| 57 |
+
|
| 58 |
+
model = AutoModelForTokenClassification.from_pretrained(
|
| 59 |
+
args.model_name, num_labels=len(label_list), id2label={i:l for i,l in enumerate(label_list)}, label2id={l:i for i,l in enumerate(label_list)}
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
data_collator = DataCollatorForTokenClassification(tokenizer)
|
| 63 |
+
|
| 64 |
+
training_args = TrainingArguments(
|
| 65 |
+
output_dir=args.output_dir,
|
| 66 |
+
evaluation_strategy="epoch",
|
| 67 |
+
learning_rate=args.lr,
|
| 68 |
+
per_device_train_batch_size=args.batch_size,
|
| 69 |
+
per_device_eval_batch_size=args.batch_size,
|
| 70 |
+
num_train_epochs=args.epochs,
|
| 71 |
+
weight_decay=0.01,
|
| 72 |
+
load_best_model_at_end=True,
|
| 73 |
+
metric_for_best_model="f1",
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
trainer = Trainer(
|
| 77 |
+
model=model,
|
| 78 |
+
args=training_args,
|
| 79 |
+
train_dataset=train_ds,
|
| 80 |
+
eval_dataset=eval_ds,
|
| 81 |
+
tokenizer=tokenizer,
|
| 82 |
+
data_collator=data_collator,
|
| 83 |
+
compute_metrics=lambda p: compute_metrics_ner(p, label_list),
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
trainer.train()
|
| 87 |
+
trainer.save_model(args.output_dir)
|
| 88 |
+
tokenizer.save_pretrained(args.output_dir)
|