| """ |
| Azerbaijani NER Benchmark Evaluation Script |
| |
| Evaluates all four Azerbaijani NER models on the benchmark test set |
| and prints a comparison table with precision, recall, and F1-score. |
| |
| Usage: |
| pip install transformers datasets seqeval torch |
| python evaluate_models.py |
| """ |
|
|
| from datasets import load_dataset |
| from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification |
| from seqeval.metrics import precision_score, recall_score, f1_score, classification_report |
| import torch |
|
|
| |
| MODELS = [ |
| { |
| "name": "mBERT Azerbaijani NER", |
| "repo": "IsmatS/mbert-az-ner", |
| "params": "180M", |
| }, |
| { |
| "name": "XLM-RoBERTa Base", |
| "repo": "IsmatS/xlm-roberta-az-ner", |
| "params": "125M", |
| }, |
| { |
| "name": "XLM-RoBERTa Large", |
| "repo": "IsmatS/xlm_roberta_large_az_ner", |
| "params": "355M", |
| }, |
| { |
| "name": "Azeri-Turkish BERT", |
| "repo": "IsmatS/azeri-turkish-bert-ner", |
| "params": "110M", |
| }, |
| ] |
|
|
| |
| ID2LABEL_BENCHMARK = { |
| 0: "O", |
| 1: "B-PERSON", |
| 2: "I-PERSON", |
| 3: "B-LOCATION", |
| 4: "I-LOCATION", |
| 5: "B-ORGANISATION", |
| 6: "I-ORGANISATION", |
| 7: "B-DATE", |
| 8: "I-DATE", |
| } |
|
|
|
|
| def load_benchmark(): |
| """Load the benchmark test split.""" |
| print("Loading Azerbaijani NER Benchmark …") |
| dataset = load_dataset("IsmatS/azerbaijani-ner-benchmark", split="test") |
| print(f" Loaded {len(dataset)} sentences.\n") |
| return dataset |
|
|
|
|
| def align_predictions(predictions, label_ids, id2label): |
| """ |
| Convert token-level model outputs to word-level IOB2 tag sequences, |
| aligning with the gold labels from the benchmark. |
| |
| Args: |
| predictions: list of dicts returned by the HF pipeline |
| (word-level, after aggregation_strategy='first') |
| label_ids: list[int] of gold NER tag ids for one sentence |
| Returns: |
| pred_tags: list[str] of predicted tag strings |
| gold_tags: list[str] of gold tag strings |
| """ |
| gold_tags = [id2label.get(lid, "O") for lid in label_ids] |
| |
| pred_tags = [p["entity"] for p in predictions] |
|
|
| |
| min_len = min(len(pred_tags), len(gold_tags)) |
| pred_tags = pred_tags[:min_len] |
| gold_tags = gold_tags[:min_len] |
|
|
| return pred_tags, gold_tags |
|
|
|
|
| def evaluate_model(model_info, dataset): |
| """ |
| Run a single model over the full benchmark and return seqeval metrics. |
| """ |
| repo = model_info["repo"] |
| print(f" Loading model: {repo}") |
|
|
| device = 0 if torch.cuda.is_available() else -1 |
|
|
| ner = pipeline( |
| "token-classification", |
| model=repo, |
| tokenizer=repo, |
| aggregation_strategy="first", |
| device=device, |
| ) |
|
|
| all_pred_tags = [] |
| all_gold_tags = [] |
|
|
| for i, example in enumerate(dataset): |
| tokens = example["tokens"] |
| label_ids = example["ner_tags"] |
| text = " ".join(tokens) |
|
|
| try: |
| predictions = ner(text) |
| except Exception: |
| |
| predictions = [{"entity": "O", "word": t} for t in tokens] |
|
|
| pred_tags, gold_tags = align_predictions( |
| predictions, label_ids, ID2LABEL_BENCHMARK |
| ) |
| all_pred_tags.append(pred_tags) |
| all_gold_tags.append(gold_tags) |
|
|
| if (i + 1) % 500 == 0: |
| print(f" Processed {i + 1}/{len(dataset)} examples …") |
|
|
| precision = precision_score(all_gold_tags, all_pred_tags) |
| recall = recall_score(all_gold_tags, all_pred_tags) |
| f1 = f1_score(all_gold_tags, all_pred_tags) |
|
|
| return { |
| "precision": precision, |
| "recall": recall, |
| "f1": f1, |
| "detailed_report": classification_report(all_gold_tags, all_pred_tags), |
| } |
|
|
|
|
| def print_results_table(results): |
| """Print a formatted comparison table.""" |
| header = f"{'Model':<35} {'Params':>8} {'Precision':>10} {'Recall':>8} {'F1-Score':>9}" |
| separator = "-" * len(header) |
|
|
| print("\n" + separator) |
| print("AZERBAIJANI NER BENCHMARK — MODEL COMPARISON") |
| print(separator) |
| print(header) |
| print(separator) |
|
|
| for model_info, metrics in results: |
| name = model_info["name"] |
| params = model_info["params"] |
| p = metrics["precision"] * 100 |
| r = metrics["recall"] * 100 |
| f = metrics["f1"] * 100 |
| print(f"{name:<35} {params:>8} {p:>9.2f}% {r:>7.2f}% {f:>8.2f}%") |
|
|
| print(separator) |
|
|
| |
| best = max(results, key=lambda x: x[1]["f1"]) |
| print(f"\nBest model: {best[0]['name']} (F1 = {best[1]['f1']*100:.2f}%)\n") |
|
|
|
|
| def main(): |
| dataset = load_benchmark() |
| results = [] |
|
|
| for model_info in MODELS: |
| print(f"\nEvaluating: {model_info['name']}") |
| metrics = evaluate_model(model_info, dataset) |
| results.append((model_info, metrics)) |
|
|
| |
| print(f"\n Detailed report for {model_info['name']}:") |
| for line in metrics["detailed_report"].splitlines(): |
| print(" " + line) |
|
|
| print_results_table(results) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|