tamil-morphological-benchmark / morph_benchmark_eval.py
mohanprakash462's picture
Upload morph_benchmark_eval.py with huggingface_hub
5ed27b0 verified
"""
TAMIL-MORPH Benchmark Evaluation Script
========================================
Evaluates LLMs on 1,028 Tamil morphological test cases across 9 categories.
Paper: "A Thousand Language Problem: Morphological Understanding in Linguistic AI"
Models tested:
1. abhinand/tamil-llama-13b-instruct-v0.1 (top Tamil open-source)
2. Tamil-ai/tamil-qwen25-7b-instruct (ours)
3. Qwen/Qwen2.5-7B-Instruct (baseline, no Tamil fine-tune)
4. gpt-4o-mini (OpenAI, cheapest closed-source)
5. gemini-2.0-flash (Google, free tier)
Usage:
# Run with OpenAI API (from anywhere):
python morph_benchmark_eval.py --model gpt-4o-mini --backend openai
# Run with Google Gemini (from anywhere, free):
python morph_benchmark_eval.py --model gemini-2.0-flash --backend gemini
# Run local model on RunPod GPU:
python morph_benchmark_eval.py --model Tamil-ai/tamil-qwen25-7b-instruct --backend local
# Run all models:
python morph_benchmark_eval.py --all
"""
import json
import os
import re
import sys
import time
import argparse
import unicodedata
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional
# ---------------------------------------------------------------------------
# 1. BENCHMARK DATA PARSER
# ---------------------------------------------------------------------------
BENCHMARK_FILE = Path(__file__).parent / "Benchmarkdata.md"
SECTION_MARKERS = [
(r"\*\*## PROMPT 1.*Case Suffixes", "case_suffixes"),
(r"## PROMPT 2.*Plural", "plural_case"),
(r"## PROMPT 3.*Verb Conjugation", "verb_conjugation"),
(r"\*\*prompt 4\*\*", "sandhi"),
(r"\*\*prompt 5\*\*", "honorific"),
(r"\*\*prompt 6\*\*", "negation"),
(r"\*\*prompt 7\*\*", "compound"),
(r"\*\*prompt 8\*\*", "conditional"),
(r"\*\*prompt\*\*", "novel"),
]
def extract_json_blocks(text: str) -> list[str]:
"""Extract JSON array blocks from markdown text."""
blocks = []
depth = 0
start = None
for i, ch in enumerate(text):
if ch == "[" and depth == 0:
start = i
depth = 1
elif ch == "[":
depth += 1
elif ch == "]":
depth -= 1
if depth == 0 and start is not None:
blocks.append(text[start : i + 1])
start = None
return blocks
def parse_benchmark_data(filepath: str = None) -> dict:
"""Parse Benchmarkdata.md into structured test cases per category."""
fp = Path(filepath) if filepath else BENCHMARK_FILE
raw = fp.read_text(encoding="utf-8")
# Split into sections
lines = raw.split("\n")
sections = {}
current_key = None
current_lines = []
for line in lines:
matched = False
for pattern, key in SECTION_MARKERS:
if re.search(pattern, line, re.IGNORECASE):
if current_key:
sections[current_key] = "\n".join(current_lines)
current_key = key
current_lines = []
matched = True
break
if not matched and current_key:
current_lines.append(line)
if current_key:
sections[current_key] = "\n".join(current_lines)
# Parse JSON from each section
parsed = {}
for key, content in sections.items():
blocks = extract_json_blocks(content)
if blocks:
try:
parsed[key] = json.loads(blocks[0])
except json.JSONDecodeError as e:
print(f" WARNING: Failed to parse JSON for {key}: {e}")
parsed[key] = []
return parsed
# ---------------------------------------------------------------------------
# 2. TEST CASE GENERATOR — creates (prompt, expected, category, sub_id)
# ---------------------------------------------------------------------------
CASE_NAMES_TAMIL = {
"accusative": "இரண்டாம் வேற்றுமை (accusative / -ஐ)",
"dative": "நான்காம் வேற்றுமை (dative / -க்கு)",
"locative": "ஏழாம் வேற்றுமை (locative / -இல்)",
"ablative": "ablative (-இலிருந்து)",
"genitive": "ஆறாம் வேற்றுமை (genitive / -இன்)",
"sociative": "மூன்றாம் வேற்றுமை (sociative / -ஓடு)",
}
PERSON_TAMIL = {
"naan_present": ("நான்", "present tense / நிகழ்காலம்"),
"naan_past": ("நான்", "past tense / இறந்தகாலம்"),
"naan_future": ("நான்", "future tense / எதிர்காலம்"),
"avan_present": ("அவன்", "present tense / நிகழ்காலம்"),
"aval_present": ("அவள்", "present tense / நிகழ்காலம்"),
"avargal_present": ("அவர்கள்", "present tense / நிகழ்காலம்"),
"naangal_present": ("நாங்கள்", "present tense / நிகழ்காலம்"),
}
PLURAL_FORMS = {
"plural": "plural / பன்மை",
"plural_locative": "plural + locative / பன்மை + -இல்",
"plural_ablative": "plural + ablative / பன்மை + -இலிருந்து",
"plural_dative": "plural + dative / பன்மை + -க்கு",
}
NEGATION_FORMS = {
"present_negative": "present negative / நிகழ்கால எதிர்மறை",
"past_negative": "past negative / இறந்தகால எதிர்மறை",
"future_negative": "future negative / எதிர்கால எதிர்மறை",
}
CONDITIONAL_FORMS = {
"conditional": "conditional / நிபந்தனை (-ஆல்)",
"causal": "causal / காரண (-ததால்)",
}
HONORIFIC_LEVELS = {
"informal": "informal / முறைசாரா (நீ)",
"formal": "formal / மரியாதை (நீங்கள்)",
"high_honorific": "high honorific / உயர் மரியாதை (literary)",
}
SYSTEM_PROMPT = (
"You are a Tamil linguistics expert. "
"Answer with ONLY the Tamil word or phrase requested. "
"Do not add explanations, translations, or extra text. "
"Just the Tamil form, nothing else."
)
def generate_test_cases(data: dict) -> list[dict]:
"""Convert parsed benchmark data into individual test cases with prompts."""
cases = []
# --- Category 1: Case Suffixes ---
for item in data.get("case_suffixes", []):
root = item["root"]
meaning = item["root_meaning"]
for case_key, form_data in item["forms"].items():
expected = form_data["tamil"]
case_desc = CASE_NAMES_TAMIL.get(case_key, case_key)
prompt = (
f"What is the {case_key} form of the Tamil word '{root}' ({meaning})?\n"
f"Answer with ONLY the Tamil word."
)
cases.append({
"category": "case_suffixes",
"sub_id": f"{root}_{case_key}",
"prompt": prompt,
"expected": expected,
"root": root,
})
# --- Category 2: Plural + Case ---
for item in data.get("plural_case", []):
root = item["root"]
meaning = item["root_meaning"]
for form_key, form_data in item["forms"].items():
expected = form_data["tamil"]
form_desc = PLURAL_FORMS.get(form_key, form_key)
prompt = (
f"What is the {form_desc} form of the Tamil word '{root}' ({meaning})?\n"
f"Answer with ONLY the Tamil word."
)
cases.append({
"category": "plural_case",
"sub_id": f"{root}_{form_key}",
"prompt": prompt,
"expected": expected,
"root": root,
})
# --- Category 3: Verb Conjugation ---
for item in data.get("verb_conjugation", []):
root = item["root"]
meaning = item["root_meaning"]
for form_key, form_data in item["forms"].items():
expected = form_data["tamil"]
if form_key in PERSON_TAMIL:
person, tense_desc = PERSON_TAMIL[form_key]
prompt = (
f"Conjugate the Tamil verb '{root}' ({meaning}) for {person} in {tense_desc}.\n"
f"Answer with ONLY the conjugated Tamil word."
)
else:
prompt = (
f"Conjugate the Tamil verb '{root}' ({meaning}) in the {form_key} form.\n"
f"Answer with ONLY the Tamil form."
)
cases.append({
"category": "verb_conjugation",
"sub_id": f"{root}_{form_key}",
"prompt": prompt,
"expected": expected,
"root": root,
})
# --- Category 4: Sandhi ---
for item in data.get("sandhi", []):
w1 = item.get("word1", "")
m1 = item.get("word1_meaning", "")
w2 = item.get("word2", "")
m2 = item.get("word2_meaning", "")
expected = item.get("combined", "")
if not expected or not w1:
continue
prompt = (
f"When the Tamil words '{w1}' ({m1}) and '{w2}' ({m2}) combine "
f"according to Tamil sandhi (புணர்ச்சி) rules, what is the combined form?\n"
f"Answer with ONLY the combined Tamil word."
)
cases.append({
"category": "sandhi",
"sub_id": f"{w1}+{w2}",
"prompt": prompt,
"expected": expected,
"root": f"{w1}+{w2}",
})
# --- Category 5: Honorific ---
for item in data.get("honorific", []):
action = item.get("action", "")
for level_key, form_data in item.get("forms", {}).items():
expected = form_data["tamil"]
level_desc = HONORIFIC_LEVELS.get(level_key, level_key)
prompt = (
f"What is the {level_desc} Tamil form for the action '{action}'?\n"
f"Answer with ONLY the Tamil word or short phrase."
)
cases.append({
"category": "honorific",
"sub_id": f"{action}_{level_key}",
"prompt": prompt,
"expected": expected,
"root": action,
})
# --- Category 6: Negation ---
for item in data.get("negation", []):
root = item["root"]
meaning = item["root_meaning"]
for form_key, form_data in item["forms"].items():
expected = form_data["tamil"]
form_desc = NEGATION_FORMS.get(form_key, form_key)
prompt = (
f"What is the {form_desc} form of the Tamil verb '{root}' ({meaning}) "
f"for நான் (I)?\n"
f"Answer with ONLY the Tamil phrase."
)
cases.append({
"category": "negation",
"sub_id": f"{root}_{form_key}",
"prompt": prompt,
"expected": expected,
"root": root,
})
# --- Category 7: Compound Words ---
for item in data.get("compound", []):
w1 = item.get("word1", "")
m1 = item.get("word1_meaning", "")
w2 = item.get("word2", "")
m2 = item.get("word2_meaning", "")
expected = item.get("compound", "")
if not expected or not w1:
continue
prompt = (
f"What is the Tamil compound word formed by combining "
f"'{w1}' ({m1}) and '{w2}' ({m2})?\n"
f"Answer with ONLY the compound Tamil word."
)
cases.append({
"category": "compound",
"sub_id": f"{w1}+{w2}",
"prompt": prompt,
"expected": expected,
"root": f"{w1}+{w2}",
})
# --- Category 8: Conditional/Causal ---
for item in data.get("conditional", []):
root = item["root"]
meaning = item["root_meaning"]
for form_key, form_data in item["forms"].items():
expected = form_data["tamil"]
form_desc = CONDITIONAL_FORMS.get(form_key, form_key)
prompt = (
f"What is the {form_desc} form of the Tamil verb '{root}' ({meaning})?\n"
f"Answer with ONLY the Tamil word."
)
cases.append({
"category": "conditional",
"sub_id": f"{root}_{form_key}",
"prompt": prompt,
"expected": expected,
"root": root,
})
# --- Category 9: Novel Combinations ---
for item in data.get("novel", []):
expected = item["form"]
breakdown = item["breakdown"]
meaning = item["meaning"]
cat = item.get("category", "novel")
item_id = item.get("id", "")
prompt = (
f"Combine the following Tamil morphemes into a single valid Tamil word/form:\n"
f"Morphemes: {breakdown}\n"
f"Intended meaning: {meaning}\n"
f"Answer with ONLY the combined Tamil form."
)
cases.append({
"category": "novel",
"sub_id": f"novel_{item_id}_{cat}",
"prompt": prompt,
"expected": expected,
"root": breakdown,
})
return cases
# ---------------------------------------------------------------------------
# 3. MODEL BACKENDS
# ---------------------------------------------------------------------------
def normalize_tamil(text: str) -> str:
"""Normalize Tamil text for comparison."""
text = unicodedata.normalize("NFC", text)
text = text.strip()
# Remove common punctuation and quotes
text = re.sub(r'["""\'`\.\,\!\?\;\:\(\)\[\]\{\}]', '', text)
text = text.strip()
return text
def extract_tamil_answer(response: str) -> str:
"""Extract the Tamil word/phrase from a model response."""
response = response.strip()
# If response is just Tamil text (ideal case)
tamil_pattern = re.compile(r'[\u0B80-\u0BFF][\u0B80-\u0BFF\s]*[\u0B80-\u0BFF]')
# Also match single Tamil word
single_tamil = re.compile(r'[\u0B80-\u0BFF]+')
# Try to find Tamil text in the response
lines = response.split("\n")
for line in lines:
line = line.strip()
if not line:
continue
# Skip lines that look like explanations
if any(line.startswith(p) for p in ["Note:", "Explanation:", "The ", "This ", "Here"]):
continue
# Find Tamil text in this line
match = tamil_pattern.search(line)
if match:
return normalize_tamil(match.group())
match = single_tamil.search(line)
if match:
return normalize_tamil(match.group())
# Fallback: any Tamil text in the whole response
match = tamil_pattern.search(response)
if match:
return normalize_tamil(match.group())
match = single_tamil.search(response)
if match:
return normalize_tamil(match.group())
return normalize_tamil(response)
def score_response(expected: str, predicted: str) -> float:
"""Score a response: 1.0 = exact match, 0.5 = contained, 0.0 = wrong."""
exp_norm = normalize_tamil(expected)
pred_norm = normalize_tamil(predicted)
if not pred_norm:
return 0.0
# Exact match
if exp_norm == pred_norm:
return 1.0
# Expected is contained in prediction (model gave correct form + extra text)
if exp_norm in pred_norm:
return 1.0
# Prediction is contained in expected (partial match)
if pred_norm in exp_norm and len(pred_norm) > 2:
return 0.5
return 0.0
# --- Local Model Backend (transformers) ---
class LocalModelBackend:
def __init__(self, model_id: str, device: str = "auto", load_in_4bit: bool = True):
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
print(f"Loading {model_id}...")
self.model_id = model_id
self.tokenizer = AutoTokenizer.from_pretrained(
model_id, trust_remote_code=True
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
load_kwargs = {
"trust_remote_code": True,
"device_map": device,
"torch_dtype": torch.float16,
}
if load_in_4bit:
from transformers import BitsAndBytesConfig
load_kwargs["quantization_config"] = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_type="nf4",
)
self.model = AutoModelForCausalLM.from_pretrained(model_id, **load_kwargs)
self.model.eval()
print(f" Loaded {model_id} successfully.")
def generate(self, prompt: str, system_prompt: str = SYSTEM_PROMPT) -> str:
import torch
# Try chat template first
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
]
try:
text = self.tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
except Exception:
# Fallback for models without chat template
text = f"{system_prompt}\n\nUser: {prompt}\nAssistant:"
inputs = self.tokenizer(text, return_tensors="pt").to(self.model.device)
with torch.no_grad():
outputs = self.model.generate(
**inputs,
max_new_tokens=64,
temperature=0.1,
top_p=0.9,
do_sample=True,
repetition_penalty=1.1,
pad_token_id=self.tokenizer.pad_token_id,
)
response = self.tokenizer.decode(
outputs[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True
)
return response.strip()
# --- OpenAI Backend ---
class OpenAIBackend:
def __init__(self, model: str = "gpt-4o-mini"):
from openai import OpenAI
self.client = OpenAI() # uses OPENAI_API_KEY env var
self.model = model
print(f"OpenAI backend: {model}")
def generate(self, prompt: str, system_prompt: str = SYSTEM_PROMPT) -> str:
for attempt in range(3):
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": prompt},
],
max_tokens=64,
temperature=0.1,
)
return response.choices[0].message.content.strip()
except Exception as e:
if attempt < 2:
time.sleep(2 ** attempt)
else:
print(f" OpenAI error: {e}")
return ""
# --- Google Gemini Backend ---
class GeminiBackend:
def __init__(self, model: str = "gemini-2.0-flash"):
import google.generativeai as genai
genai.configure(api_key=os.environ.get("GOOGLE_API_KEY", ""))
self.model = genai.GenerativeModel(
model_name=model,
system_instruction=SYSTEM_PROMPT,
)
self.model_name = model
print(f"Gemini backend: {model}")
def generate(self, prompt: str, system_prompt: str = SYSTEM_PROMPT) -> str:
import google.generativeai as genai
for attempt in range(5):
try:
response = self.model.generate_content(
prompt,
generation_config=genai.types.GenerationConfig(
max_output_tokens=64,
temperature=0.1,
),
)
return response.text.strip()
except Exception as e:
err_str = str(e)
if "429" in err_str and attempt < 4:
wait = min(2 ** (attempt + 2), 60)
print(f" Rate limited, waiting {wait}s...")
time.sleep(wait)
elif attempt < 4:
time.sleep(2 ** attempt)
else:
print(f" Gemini error: {e}")
return ""
# ---------------------------------------------------------------------------
# 4. EVALUATION RUNNER
# ---------------------------------------------------------------------------
def run_evaluation(backend, test_cases: list[dict], save_path: str = None) -> list[dict]:
"""Run all test cases through a model and score them."""
results = []
total = len(test_cases)
for i, tc in enumerate(test_cases):
raw_response = backend.generate(tc["prompt"])
predicted = extract_tamil_answer(raw_response)
sc = score_response(tc["expected"], predicted)
result = {
"category": tc["category"],
"sub_id": tc["sub_id"],
"expected": tc["expected"],
"predicted": predicted,
"raw_response": raw_response[:200],
"score": sc,
}
results.append(result)
# Progress
if (i + 1) % 50 == 0 or i == 0:
cat_so_far = [r for r in results if r["category"] == tc["category"]]
cat_acc = sum(r["score"] for r in cat_so_far) / len(cat_so_far) if cat_so_far else 0
print(f" [{i+1}/{total}] {tc['category']}: running accuracy {cat_acc:.1%}")
# Rate limiting for API backends
if isinstance(backend, (OpenAIBackend, GeminiBackend)):
time.sleep(0.1)
# Save intermediate results
if save_path:
with open(save_path, "w", encoding="utf-8") as f:
json.dump(results, f, ensure_ascii=False, indent=2)
print(f" Results saved to {save_path}")
return results
# ---------------------------------------------------------------------------
# 5. REPORT GENERATOR
# ---------------------------------------------------------------------------
CATEGORY_NAMES = {
"case_suffixes": "Case Suffixes (வேற்றுமை)",
"plural_case": "Plural + Case (பன்மை)",
"verb_conjugation": "Verb Conjugation (வினைத்திரிபு)",
"sandhi": "Sandhi (புணர்ச்சி)",
"honorific": "Honorific (மரியாதை)",
"negation": "Negation (எதிர்மறை)",
"compound": "Compound Words (கூட்டுச்சொல்)",
"conditional": "Conditional/Causal (நிபந்தனை)",
"novel": "Novel Combinations (புதிய வடிவங்கள்)",
}
def generate_report(results: list[dict], model_name: str) -> dict:
"""Generate per-category and overall scores."""
categories = {}
for r in results:
cat = r["category"]
if cat not in categories:
categories[cat] = {"total": 0, "exact": 0, "partial": 0, "wrong": 0, "score_sum": 0}
categories[cat]["total"] += 1
categories[cat]["score_sum"] += r["score"]
if r["score"] == 1.0:
categories[cat]["exact"] += 1
elif r["score"] == 0.5:
categories[cat]["partial"] += 1
else:
categories[cat]["wrong"] += 1
# Print report
print(f"\n{'='*80}")
print(f"MORPHOLOGICAL BENCHMARK RESULTS: {model_name}")
print(f"{'='*80}")
print(f"{'Category':<40} {'Total':>6} {'Exact':>6} {'Partial':>8} {'Wrong':>6} {'Accuracy':>9}")
print(f"{'-'*80}")
overall_score = 0
overall_total = 0
cat_scores = {}
for cat_key in CATEGORY_NAMES:
if cat_key in categories:
c = categories[cat_key]
acc = c["score_sum"] / c["total"] if c["total"] > 0 else 0
cat_name = CATEGORY_NAMES[cat_key]
print(f"{cat_name:<40} {c['total']:>6} {c['exact']:>6} {c['partial']:>8} {c['wrong']:>6} {acc:>8.1%}")
overall_score += c["score_sum"]
overall_total += c["total"]
cat_scores[cat_key] = round(acc * 100, 1)
overall_acc = overall_score / overall_total if overall_total > 0 else 0
print(f"{'-'*80}")
print(f"{'OVERALL':<40} {overall_total:>6} {'':>6} {'':>8} {'':>6} {overall_acc:>8.1%}")
print(f"{'='*80}")
# Top failures
failures = [r for r in results if r["score"] == 0.0]
if failures:
print(f"\nSample failures (first 10):")
for f in failures[:10]:
print(f" [{f['category']}] expected='{f['expected']}' got='{f['predicted']}'")
return {
"model": model_name,
"overall_accuracy": round(overall_acc * 100, 1),
"total_cases": overall_total,
"category_scores": cat_scores,
}
def generate_comparison_table(all_reports: list[dict]):
"""Generate a LaTeX-ready comparison table across all models."""
print(f"\n{'='*100}")
print("COMPARISON TABLE — Tamil Morphological Benchmark")
print(f"{'='*100}")
# Header
model_names = [r["model"] for r in all_reports]
header = f"{'Category':<35}"
for name in model_names:
short = name.split("/")[-1][:18]
header += f" {short:>18}"
print(header)
print("-" * 100)
# Per-category rows
for cat_key, cat_name in CATEGORY_NAMES.items():
row = f"{cat_name[:35]:<35}"
for report in all_reports:
score = report["category_scores"].get(cat_key, "-")
if isinstance(score, (int, float)):
row += f" {score:>17.1f}%"
else:
row += f" {'N/A':>18}"
print(row)
# Overall row
print("-" * 100)
row = f"{'OVERALL':<35}"
for report in all_reports:
row += f" {report['overall_accuracy']:>17.1f}%"
print(row)
print("=" * 100)
# Save as JSON for paper
output_path = Path(__file__).parent / "benchmark_comparison.json"
with open(output_path, "w", encoding="utf-8") as f:
json.dump(all_reports, f, ensure_ascii=False, indent=2)
print(f"\nComparison data saved to {output_path}")
# ---------------------------------------------------------------------------
# 6. MODEL CONFIGURATIONS
# ---------------------------------------------------------------------------
MODELS = {
"tamil-llama-13b": {
"id": "abhinand/tamil-llama-13b-instruct-v0.1",
"backend": "local",
"desc": "Tamil-Llama 13B (top Tamil open-source)",
},
"tamil-qwen-7b": {
"id": "Tamil-ai/tamil-qwen25-7b-instruct",
"backend": "local",
"desc": "Tamil-Qwen 7B (ours)",
},
"qwen-base": {
"id": "Qwen/Qwen2.5-7B-Instruct",
"backend": "local",
"desc": "Qwen2.5-7B base (no Tamil fine-tune)",
},
"gpt-4o-mini": {
"id": "gpt-4o-mini",
"backend": "openai",
"desc": "GPT-4o-mini (OpenAI, cheapest)",
},
"gemini-flash": {
"id": "gemini-2.5-flash-lite",
"backend": "gemini",
"desc": "Gemini 2.5 Flash-Lite (Google)",
},
}
# ---------------------------------------------------------------------------
# 7. MAIN
# ---------------------------------------------------------------------------
def create_backend(model_key: str, model_override: str = None):
"""Create the appropriate backend for a model."""
if model_key in MODELS:
cfg = MODELS[model_key]
model_id = model_override or cfg["id"]
backend_type = cfg["backend"]
else:
# Custom model — assume local
model_id = model_override or model_key
backend_type = "local"
if backend_type == "openai":
return OpenAIBackend(model_id)
elif backend_type == "gemini":
return GeminiBackend(model_id)
else:
return LocalModelBackend(model_id)
def main():
parser = argparse.ArgumentParser(
description="TAMIL-MORPH Benchmark: Evaluate LLMs on Tamil morphological understanding"
)
parser.add_argument(
"--model", type=str, default=None,
help="Model key (tamil-llama-13b, tamil-qwen-7b, qwen-base, gpt-4o-mini, gemini-flash) "
"or a HuggingFace model ID for local inference"
)
parser.add_argument(
"--backend", type=str, choices=["local", "openai", "gemini"], default=None,
help="Override backend type"
)
parser.add_argument("--all", action="store_true", help="Run all models")
parser.add_argument("--api-only", action="store_true", help="Run only API models (no GPU needed)")
parser.add_argument(
"--benchmark-file", type=str, default=None,
help="Path to Benchmarkdata.md (default: same directory as this script)"
)
parser.add_argument(
"--output-dir", type=str, default=None,
help="Directory for results (default: ./benchmark_results/)"
)
parser.add_argument(
"--categories", type=str, nargs="+", default=None,
help="Run only specific categories (e.g., case_suffixes verb_conjugation)"
)
parser.add_argument("--no-4bit", action="store_true", help="Disable 4-bit quantization for local models")
args = parser.parse_args()
# Parse benchmark data
print("Parsing benchmark data...")
data = parse_benchmark_data(args.benchmark_file)
test_cases = generate_test_cases(data)
# Filter categories if requested
if args.categories:
test_cases = [tc for tc in test_cases if tc["category"] in args.categories]
print(f"Total test cases: {len(test_cases)}")
for cat in CATEGORY_NAMES:
count = sum(1 for tc in test_cases if tc["category"] == cat)
if count > 0:
print(f" {CATEGORY_NAMES[cat]}: {count}")
# Output directory
output_dir = Path(args.output_dir) if args.output_dir else Path(__file__).parent / "benchmark_results"
output_dir.mkdir(exist_ok=True)
# Determine which models to run
if args.all:
model_keys = list(MODELS.keys())
elif args.api_only:
model_keys = ["gpt-4o-mini", "gemini-flash"]
elif args.model:
model_keys = [args.model]
else:
print("\nAvailable models:")
for key, cfg in MODELS.items():
print(f" {key:20s}{cfg['id']} ({cfg['desc']})")
print("\nUsage: python morph_benchmark_eval.py --model <model_key>")
print(" python morph_benchmark_eval.py --api-only")
print(" python morph_benchmark_eval.py --all")
return
# Run evaluations
all_reports = []
for model_key in model_keys:
cfg = MODELS.get(model_key, {"id": model_key, "backend": args.backend or "local", "desc": model_key})
model_id = cfg["id"]
print(f"\n{'='*80}")
print(f"Evaluating: {model_id} ({cfg['desc']})")
print(f"{'='*80}")
try:
if args.backend:
# Override backend
if args.backend == "openai":
backend = OpenAIBackend(model_id)
elif args.backend == "gemini":
backend = GeminiBackend(model_id)
else:
backend = LocalModelBackend(model_id, load_in_4bit=not args.no_4bit)
else:
if cfg["backend"] == "openai":
backend = OpenAIBackend(model_id)
elif cfg["backend"] == "gemini":
backend = GeminiBackend(model_id)
else:
backend = LocalModelBackend(model_id, load_in_4bit=not args.no_4bit)
save_path = str(output_dir / f"{model_key.replace('/', '_')}_results.json")
results = run_evaluation(backend, test_cases, save_path)
report = generate_report(results, model_id)
all_reports.append(report)
# Free GPU memory for local models
if hasattr(backend, "model"):
del backend.model
del backend
import gc
gc.collect()
try:
import torch
torch.cuda.empty_cache()
except Exception:
pass
except Exception as e:
print(f" FAILED: {e}")
import traceback
traceback.print_exc()
# Generate comparison table
if len(all_reports) > 1:
generate_comparison_table(all_reports)
elif len(all_reports) == 1:
# Save single report
output_path = output_dir / "benchmark_comparison.json"
with open(output_path, "w", encoding="utf-8") as f:
json.dump(all_reports, f, ensure_ascii=False, indent=2)
print("\nDone!")
if __name__ == "__main__":
main()