| from pathlib import Path |
| import shutil |
| from datasets import load_dataset |
| from transformers import TrainingArguments |
| from span_marker import SpanMarkerModel, Trainer, SpanMarkerModelCardData |
|
|
| import os |
|
|
| os.environ["CODECARBON_LOG_LEVEL"] = "error" |
|
|
|
|
| def main() -> None: |
| |
| dataset_name = "Acronym Identification" |
| dataset_id = "acronym_identification" |
| dataset = load_dataset(dataset_id).rename_column("labels", "ner_tags") |
| labels = dataset["train"].features["ner_tags"].feature.names |
|
|
| |
| encoder_id = "bert-base-cased" |
| model_id = "tomaarsen/span-marker-bert-base-acronyms" |
| model = SpanMarkerModel.from_pretrained( |
| encoder_id, |
| labels=labels, |
| |
| model_max_length=256, |
| marker_max_length=128, |
| entity_max_length=8, |
| |
| model_card_data=SpanMarkerModelCardData( |
| model_id=model_id, |
| encoder_id=encoder_id, |
| dataset_name=dataset_name, |
| dataset_id=dataset_id, |
| license="apache-2.0", |
| language="en", |
| ), |
| ) |
|
|
| |
| output_dir = Path("models") / model_id |
| args = TrainingArguments( |
| output_dir=output_dir, |
| run_name=model_id, |
| |
| learning_rate=5e-5, |
| per_device_train_batch_size=32, |
| per_device_eval_batch_size=32, |
| num_train_epochs=2, |
| weight_decay=0.01, |
| warmup_ratio=0.1, |
| bf16=True, |
| |
| logging_first_step=True, |
| logging_steps=50, |
| evaluation_strategy="steps", |
| save_strategy="steps", |
| eval_steps=200, |
| save_total_limit=2, |
| dataloader_num_workers=2, |
| ) |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=args, |
| train_dataset=dataset["train"], |
| eval_dataset=dataset["validation"], |
| ) |
| trainer.train() |
|
|
| |
| metrics = trainer.evaluate(metric_key_prefix="validation") |
| trainer.save_metrics("validation", metrics) |
|
|
| trainer.save_model(output_dir / "checkpoint-final") |
| shutil.copy2(__file__, output_dir / "checkpoint-final" / "train.py") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|