Datasets:
Upload runpod_benchmark.ipynb with huggingface_hub
Browse files- runpod_benchmark.ipynb +135 -0
runpod_benchmark.ipynb
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"id": "0p1yxckx3sxq",
|
| 6 |
+
"source": "# TAMIL-MORPH Benchmark — RunPod\n**Paper:** \"A Thousand Language Problem: Morphological Understanding in Linguistic AI\"\n\nBenchmarks 2 Tamil LLMs on 1,030 morphological test cases across 9 categories.\n- **Tamil-ai/tamil-qwen25-7b-instruct** (ours)\n- **abhinand/tamil-llama-13b-instruct-v0.1** (top Tamil open-source)",
|
| 7 |
+
"metadata": {}
|
| 8 |
+
},
|
| 9 |
+
{
|
| 10 |
+
"cell_type": "markdown",
|
| 11 |
+
"id": "4h2hutdkfg2",
|
| 12 |
+
"source": "## Step 1: Install Dependencies",
|
| 13 |
+
"metadata": {}
|
| 14 |
+
},
|
| 15 |
+
{
|
| 16 |
+
"cell_type": "code",
|
| 17 |
+
"id": "nbqryxy91u",
|
| 18 |
+
"source": "!pip install -q transformers accelerate bitsandbytes huggingface_hub sentencepiece protobuf",
|
| 19 |
+
"metadata": {},
|
| 20 |
+
"execution_count": null,
|
| 21 |
+
"outputs": []
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"cell_type": "markdown",
|
| 25 |
+
"id": "7npo72pr67m",
|
| 26 |
+
"source": "## Step 2: Download Benchmark Data from HuggingFace",
|
| 27 |
+
"metadata": {}
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"cell_type": "code",
|
| 31 |
+
"id": "uss67qa7bco",
|
| 32 |
+
"source": "from huggingface_hub import hf_hub_download\n\nbenchmark_path = hf_hub_download(\n repo_id=\"Tamil-ai/tamil-morphological-benchmark\",\n filename=\"Benchmarkdata.md\",\n repo_type=\"dataset\",\n)\nprint(f\"Downloaded to: {benchmark_path}\")",
|
| 33 |
+
"metadata": {},
|
| 34 |
+
"execution_count": null,
|
| 35 |
+
"outputs": []
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"cell_type": "markdown",
|
| 39 |
+
"id": "lnc0fhwtpcd",
|
| 40 |
+
"source": "## Step 3: Parse Benchmark Data (1,030 test cases)",
|
| 41 |
+
"metadata": {}
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"cell_type": "code",
|
| 45 |
+
"id": "k7djjy65rj",
|
| 46 |
+
"source": "import json, re, gc, unicodedata, time\nfrom pathlib import Path\nfrom datetime import datetime\n\n# --- Config ---\nSYSTEM_PROMPT = (\n \"You are a Tamil linguistics expert. \"\n \"Answer with ONLY the Tamil word or phrase requested. \"\n \"Do not add explanations, translations, or extra text. \"\n \"Just the Tamil form, nothing else.\"\n)\n\nCATEGORY_NAMES = {\n \"case_suffixes\": \"Case Suffixes (வேற்றுமை)\",\n \"plural_case\": \"Plural + Case (பன்மை)\",\n \"verb_conjugation\": \"Verb Conjugation (வினைத்திரிபு)\",\n \"sandhi\": \"Sandhi (புணர்ச்சி)\",\n \"honorific\": \"Honorific (மரியாதை)\",\n \"negation\": \"Negation (எதிர்மறை)\",\n \"compound\": \"Compound Words (கூட்டுச்சொல்)\",\n \"conditional\": \"Conditional/Causal (நிபந்தனை)\",\n \"novel\": \"Novel Combinations (புதிய வடிவங்கள்)\",\n}\n\nCASE_NAMES_TAMIL = {\n \"accusative\": \"இரண்டாம் வேற்றுமை (accusative / -ஐ)\",\n \"dative\": \"நான்காம் வேற்றுமை (dative / -க்கு)\",\n \"locative\": \"ஏழாம் வேற்றுமை (locative / -இல்)\",\n \"ablative\": \"ablative (-இலிருந்து)\",\n \"genitive\": \"ஆறாம் வேற்றுமை (genitive / -இன்)\",\n \"sociative\": \"மூன்றாம் வேற்றுமை (sociative / -ஓடு)\",\n}\n\nPERSON_TAMIL = {\n \"naan_present\": (\"நான்\", \"present tense / நிகழ்காலம்\"),\n \"naan_past\": (\"நான்\", \"past tense / இறந்தகாலம்\"),\n \"naan_future\": (\"நான்\", \"future tense / எதிர்காலம்\"),\n \"avan_present\": (\"அவன்\", \"present tense / நிகழ்காலம்\"),\n \"aval_present\": (\"அவள்\", \"present tense / நிகழ்காலம்\"),\n \"avargal_present\": (\"அவர்கள்\", \"present tense / நிகழ்காலம்\"),\n \"naangal_present\": (\"நாங்கள்\", \"present tense / நிகழ்காலம்\"),\n}\n\nPLURAL_FORMS = {\n \"plural\": \"plural / பன்மை\",\n \"plural_locative\": \"plural + locative / பன்மை + -இல்\",\n \"plural_ablative\": \"plural + ablative / பன்மை + -இலிருந்து\",\n \"plural_dative\": \"plural + dative / பன்மை + -க்கு\",\n}\n\nNEGATION_FORMS = {\n \"present_negative\": \"present negative / நிகழ்கால எதிர்மறை\",\n \"past_negative\": \"past negative / இறந்தகால எதிர்மறை\",\n \"future_negative\": \"future negative / எதிர்கால எதிர்மறை\",\n}\n\nCONDITIONAL_FORMS = {\n \"conditional\": \"conditional / நிபந்தனை (-ஆல்)\",\n \"causal\": \"causal / காரண (-ததால்)\",\n}\n\nHONORIFIC_LEVELS = {\n \"informal\": \"informal / முறைசாரா (நீ)\",\n \"formal\": \"formal / மரியாதை (நீங்கள்)\",\n \"high_honorific\": \"high honorific / உயர் மரியாதை (literary)\",\n}\n\n# --- Parser ---\nSECTION_MARKERS = [\n (r\"\\*\\*## PROMPT 1.*Case Suffixes\", \"case_suffixes\"),\n (r\"## PROMPT 2.*Plural\", \"plural_case\"),\n (r\"## PROMPT 3.*Verb Conjugation\", \"verb_conjugation\"),\n (r\"\\*\\*prompt 4\\*\\*\", \"sandhi\"),\n (r\"\\*\\*prompt 5\\*\\*\", \"honorific\"),\n (r\"\\*\\*prompt 6\\*\\*\", \"negation\"),\n (r\"\\*\\*prompt 7\\*\\*\", \"compound\"),\n (r\"\\*\\*prompt 8\\*\\*\", \"conditional\"),\n (r\"\\*\\*prompt\\*\\*\", \"novel\"),\n]\n\ndef extract_json_blocks(text):\n blocks, depth, start = [], 0, None\n for i, ch in enumerate(text):\n if ch == \"[\" and depth == 0: start, depth = i, 1\n elif ch == \"[\": depth += 1\n elif ch == \"]\":\n depth -= 1\n if depth == 0 and start is not None:\n blocks.append(text[start:i+1]); start = None\n return blocks\n\ndef parse_benchmark_data(filepath):\n raw = Path(filepath).read_text(encoding=\"utf-8\")\n lines = raw.split(\"\\n\")\n sections, current_key, current_lines = {}, None, []\n for line in lines:\n matched = False\n for pattern, key in SECTION_MARKERS:\n if re.search(pattern, line, re.IGNORECASE):\n if current_key: sections[current_key] = \"\\n\".join(current_lines)\n current_key, current_lines, matched = key, [], True; break\n if not matched and current_key: current_lines.append(line)\n if current_key: sections[current_key] = \"\\n\".join(current_lines)\n\n parsed = {}\n for key, content in sections.items():\n blocks = extract_json_blocks(content)\n if blocks:\n try: parsed[key] = json.loads(blocks[0])\n except json.JSONDecodeError as e: print(f\" WARNING: {key}: {e}\"); parsed[key] = []\n return parsed\n\nprint(\"Parsing benchmark data...\")\ndata = parse_benchmark_data(benchmark_path)\nfor k, v in data.items():\n print(f\" {CATEGORY_NAMES.get(k, k)}: {len(v)} items\")",
|
| 47 |
+
"metadata": {},
|
| 48 |
+
"execution_count": null,
|
| 49 |
+
"outputs": []
|
| 50 |
+
},
|
| 51 |
+
{
|
| 52 |
+
"cell_type": "markdown",
|
| 53 |
+
"id": "od2il1scq5c",
|
| 54 |
+
"source": "## Step 4: Generate Test Cases",
|
| 55 |
+
"metadata": {}
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"cell_type": "code",
|
| 59 |
+
"id": "u2e7qm5s5do",
|
| 60 |
+
"source": "def generate_test_cases(data):\n cases = []\n\n for item in data.get(\"case_suffixes\", []):\n root, meaning = item[\"root\"], item[\"root_meaning\"]\n for case_key, form_data in item[\"forms\"].items():\n cases.append({\n \"category\": \"case_suffixes\", \"sub_id\": f\"{root}_{case_key}\",\n \"prompt\": f\"What is the {case_key} form of the Tamil word '{root}' ({meaning})?\\nTamil grammar term: {CASE_NAMES_TAMIL.get(case_key, case_key)}\\nExample: 'வீடு' (house) accusative → வீட்டை\\nAnswer with ONLY the Tamil word.\",\n \"expected\": form_data[\"tamil\"], \"root\": root,\n })\n\n for item in data.get(\"plural_case\", []):\n root, meaning = item[\"root\"], item[\"root_meaning\"]\n for form_key, form_data in item[\"forms\"].items():\n cases.append({\n \"category\": \"plural_case\", \"sub_id\": f\"{root}_{form_key}\",\n \"prompt\": f\"What is the {PLURAL_FORMS.get(form_key, form_key)} form of the Tamil word '{root}' ({meaning})?\\nExample: 'வீடு' plural → வீடுகள், plural+locative → வீடுகளில்\\nAnswer with ONLY the Tamil word.\",\n \"expected\": form_data[\"tamil\"], \"root\": root,\n })\n\n for item in data.get(\"verb_conjugation\", []):\n root, meaning = item[\"root\"], item[\"root_meaning\"]\n for form_key, form_data in item[\"forms\"].items():\n if form_key in PERSON_TAMIL:\n person, tense_desc = PERSON_TAMIL[form_key]\n prompt = f\"Conjugate the Tamil verb '{root}' ({meaning}) for {person} in {tense_desc}.\\nExample: 'படி' (read), நான், present → படிக்கிறேன்\\nAnswer with the full form (e.g., படிக்கிறேன், not just the verb).\"\n else:\n prompt = f\"Conjugate the Tamil verb '{root}' ({meaning}) in the {form_key} form.\\nAnswer with ONLY the Tamil form.\"\n cases.append({\n \"category\": \"verb_conjugation\", \"sub_id\": f\"{root}_{form_key}\",\n \"prompt\": prompt, \"expected\": form_data[\"tamil\"], \"root\": root,\n })\n\n for item in data.get(\"sandhi\", []):\n w1, m1 = item.get(\"word1\", \"\"), item.get(\"word1_meaning\", \"\")\n w2, m2 = item.get(\"word2\", \"\"), item.get(\"word2_meaning\", \"\")\n expected = item.get(\"combined\", \"\")\n if not expected or not w1: continue\n cases.append({\n \"category\": \"sandhi\", \"sub_id\": f\"{w1}+{w2}\",\n \"prompt\": f\"When the Tamil words '{w1}' ({m1}) and '{w2}' ({m2}) combine according to Tamil sandhi (புணர்ச்சி) rules, what is the combined form?\\nExample: 'மலை' + 'அழகு' → மலையழகு\\nAnswer with ONLY the combined Tamil word.\",\n \"expected\": expected, \"root\": f\"{w1}+{w2}\",\n })\n\n for item in data.get(\"honorific\", []):\n action = item.get(\"action\", \"\")\n for level_key, form_data in item.get(\"forms\", {}).items():\n cases.append({\n \"category\": \"honorific\", \"sub_id\": f\"{action}_{level_key}\",\n \"prompt\": f\"What is the {HONORIFIC_LEVELS.get(level_key, level_key)} Tamil form for the action '{action}'?\\nExample: 'come' informal → வா, formal → வாருங்கள்\\nAnswer with ONLY the Tamil word or short phrase.\",\n \"expected\": form_data[\"tamil\"], \"root\": action,\n })\n\n for item in data.get(\"negation\", []):\n root, meaning = item[\"root\"], item[\"root_meaning\"]\n for form_key, form_data in item[\"forms\"].items():\n cases.append({\n \"category\": \"negation\", \"sub_id\": f\"{root}_{form_key}\",\n \"prompt\": f\"What is the {NEGATION_FORMS.get(form_key, form_key)} form of the Tamil verb '{root}' ({meaning}) for நான் (I)?\\nExample: 'படி' (read), present negative → படிக்கவில்லை\\nAnswer with the full Tamil phrase.\",\n \"expected\": form_data[\"tamil\"], \"root\": root,\n })\n\n for item in data.get(\"compound\", []):\n w1, m1 = item.get(\"word1\", \"\"), item.get(\"word1_meaning\", \"\")\n w2, m2 = item.get(\"word2\", \"\"), item.get(\"word2_meaning\", \"\")\n expected = item.get(\"compound\", \"\")\n if not expected or not w1: continue\n cases.append({\n \"category\": \"compound\", \"sub_id\": f\"{w1}+{w2}\",\n \"prompt\": f\"What is the Tamil compound word formed by combining '{w1}' ({m1}) and '{w2}' ({m2})?\\nExample: 'தலை' (head) + 'வலி' (pain) → தலைவலி (headache)\\nAnswer with ONLY the compound Tamil word.\",\n \"expected\": expected, \"root\": f\"{w1}+{w2}\",\n })\n\n for item in data.get(\"conditional\", []):\n root, meaning = item[\"root\"], item[\"root_meaning\"]\n for form_key, form_data in item[\"forms\"].items():\n cases.append({\n \"category\": \"conditional\", \"sub_id\": f\"{root}_{form_key}\",\n \"prompt\": f\"What is the {CONDITIONAL_FORMS.get(form_key, form_key)} form of the Tamil verb '{root}' ({meaning})?\\nExample: 'படி' (read), conditional → படித்தால், causal → படித்ததால்\\nAnswer with ONLY the Tamil word.\",\n \"expected\": form_data[\"tamil\"], \"root\": root,\n })\n\n for item in data.get(\"novel\", []):\n cases.append({\n \"category\": \"novel\", \"sub_id\": f\"novel_{item.get('id','')}_{item.get('category','novel')}\",\n \"prompt\": f\"Combine the following Tamil morphemes into a single valid Tamil word/form:\\nMorphemes: {item['breakdown']}\\nIntended meaning: {item['meaning']}\\nExample: 'பல்கலைக்கழகம் + கள் + இல் + இருந்து + கூட' → பல்கலைக்கழகங்களிலிருந்துகூட\\nAnswer with ONLY the combined Tamil form.\",\n \"expected\": item[\"form\"], \"root\": item[\"breakdown\"],\n })\n\n return cases\n\ntest_cases = generate_test_cases(data)\nprint(f\"Total test cases: {len(test_cases)}\")\nfor cat_key in CATEGORY_NAMES:\n count = sum(1 for tc in test_cases if tc[\"category\"] == cat_key)\n if count > 0:\n print(f\" {CATEGORY_NAMES[cat_key]}: {count}\")",
|
| 61 |
+
"metadata": {},
|
| 62 |
+
"execution_count": null,
|
| 63 |
+
"outputs": []
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"cell_type": "markdown",
|
| 67 |
+
"id": "f9y1w141jhi",
|
| 68 |
+
"source": "## Step 5: Scoring & Inference Functions",
|
| 69 |
+
"metadata": {}
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"cell_type": "code",
|
| 73 |
+
"id": "gixhah0g5h",
|
| 74 |
+
"source": "import torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\ndef normalize_tamil(text):\n text = unicodedata.normalize(\"NFC\", text).strip()\n text = re.sub(r'[\"\"\"\\'`\\.\\,\\!\\?\\;\\:\\(\\)\\[\\]\\{\\}]', '', text)\n return text.strip()\n\ndef extract_tamil_answer(response):\n response = response.strip()\n tamil_pattern = re.compile(r'[\\u0B80-\\u0BFF][\\u0B80-\\u0BFF\\s]*[\\u0B80-\\u0BFF]')\n single_tamil = re.compile(r'[\\u0B80-\\u0BFF]+')\n for line in response.split(\"\\n\"):\n line = line.strip()\n if not line: continue\n if any(line.startswith(p) for p in [\"Note:\", \"Explanation:\", \"The \", \"This \", \"Here\"]): continue\n match = tamil_pattern.search(line)\n if match: return normalize_tamil(match.group())\n match = single_tamil.search(line)\n if match: return normalize_tamil(match.group())\n match = tamil_pattern.search(response)\n if match: return normalize_tamil(match.group())\n match = single_tamil.search(response)\n if match: return normalize_tamil(match.group())\n return normalize_tamil(response)\n\ndef score_response(expected, predicted):\n exp_norm, pred_norm = normalize_tamil(expected), normalize_tamil(predicted)\n if not pred_norm: return 0.0\n if exp_norm == pred_norm: return 1.0\n if exp_norm in pred_norm: return 1.0\n if pred_norm in exp_norm and len(pred_norm) > 2: return 0.5\n return 0.0\n\ndef load_model(model_id):\n print(f\"\\nLoading {model_id}...\")\n tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n model = AutoModelForCausalLM.from_pretrained(\n model_id, trust_remote_code=True, device_map=\"auto\",\n torch_dtype=torch.float16,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type=\"nf4\",\n ),\n )\n model.eval()\n print(f\" Loaded on {model.device}\")\n return model, tokenizer\n\ndef generate_response(model, tokenizer, prompt):\n messages = [\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": prompt},\n ]\n try:\n text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n except Exception:\n text = f\"{SYSTEM_PROMPT}\\n\\nUser: {prompt}\\nAssistant:\"\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n outputs = model.generate(\n **inputs, max_new_tokens=64, temperature=0.1, top_p=0.9,\n do_sample=True, repetition_penalty=1.1, pad_token_id=tokenizer.pad_token_id,\n )\n return tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True).strip()\n\ndef free_model(model, tokenizer):\n del model, tokenizer\n gc.collect()\n torch.cuda.empty_cache()\n print(\" GPU memory freed.\")\n\ndef run_evaluation(model, tokenizer, test_cases):\n results = []\n for i, tc in enumerate(test_cases):\n raw = generate_response(model, tokenizer, tc[\"prompt\"])\n predicted = extract_tamil_answer(raw)\n sc = score_response(tc[\"expected\"], predicted)\n results.append({\n \"category\": tc[\"category\"], \"sub_id\": tc[\"sub_id\"],\n \"expected\": tc[\"expected\"], \"predicted\": predicted,\n \"raw_response\": raw[:200], \"score\": sc,\n })\n if (i + 1) % 50 == 0 or i == 0:\n cat_so_far = [r for r in results if r[\"category\"] == tc[\"category\"]]\n cat_acc = sum(r[\"score\"] for r in cat_so_far) / len(cat_so_far) if cat_so_far else 0\n print(f\" [{i+1}/{len(test_cases)}] {tc['category']}: {cat_acc:.1%}\")\n return results\n\ndef generate_report(results, model_name):\n categories = {}\n for r in results:\n cat = r[\"category\"]\n if cat not in categories:\n categories[cat] = {\"total\": 0, \"exact\": 0, \"partial\": 0, \"wrong\": 0, \"score_sum\": 0}\n categories[cat][\"total\"] += 1\n categories[cat][\"score_sum\"] += r[\"score\"]\n if r[\"score\"] == 1.0: categories[cat][\"exact\"] += 1\n elif r[\"score\"] == 0.5: categories[cat][\"partial\"] += 1\n else: categories[cat][\"wrong\"] += 1\n\n print(f\"\\n{'='*80}\")\n print(f\"RESULTS: {model_name}\")\n print(f\"{'='*80}\")\n print(f\"{'Category':<40} {'Total':>6} {'Exact':>6} {'Partial':>8} {'Wrong':>6} {'Acc':>9}\")\n print(f\"{'-'*80}\")\n overall_score, overall_total, cat_scores = 0, 0, {}\n for cat_key in CATEGORY_NAMES:\n if cat_key in categories:\n c = categories[cat_key]\n acc = c[\"score_sum\"] / c[\"total\"] if c[\"total\"] > 0 else 0\n print(f\"{CATEGORY_NAMES[cat_key]:<40} {c['total']:>6} {c['exact']:>6} {c['partial']:>8} {c['wrong']:>6} {acc:>8.1%}\")\n overall_score += c[\"score_sum\"]; overall_total += c[\"total\"]\n cat_scores[cat_key] = round(acc * 100, 1)\n overall_acc = overall_score / overall_total if overall_total > 0 else 0\n print(f\"{'-'*80}\")\n print(f\"{'OVERALL':<40} {overall_total:>6} {'':>6} {'':>8} {'':>6} {overall_acc:>8.1%}\")\n print(f\"{'='*80}\")\n failures = [r for r in results if r[\"score\"] == 0.0]\n if failures:\n print(f\"\\nSample failures (first 10):\")\n for f in failures[:10]:\n print(f\" [{f['category']}] expected='{f['expected']}' got='{f['predicted']}'\")\n return {\"model\": model_name, \"overall_accuracy\": round(overall_acc * 100, 1),\n \"total_cases\": overall_total, \"category_scores\": cat_scores}\n\nprint(\"Functions ready.\")",
|
| 75 |
+
"metadata": {},
|
| 76 |
+
"execution_count": null,
|
| 77 |
+
"outputs": []
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"cell_type": "markdown",
|
| 81 |
+
"id": "1upjcl4v6fm",
|
| 82 |
+
"source": "## Step 6: Run Tamil-Qwen 7B (Our Model)\n~15-20 min on RTX 5090",
|
| 83 |
+
"metadata": {}
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"cell_type": "code",
|
| 87 |
+
"id": "hidsobegji8",
|
| 88 |
+
"source": "model_1, tok_1 = load_model(\"Tamil-ai/tamil-qwen25-7b-instruct\")\nresults_qwen = run_evaluation(model_1, tok_1, test_cases)\nreport_qwen = generate_report(results_qwen, \"Tamil-ai/tamil-qwen25-7b-instruct\")\n\n# Save results\nwith open(\"tamil_qwen_7b_results.json\", \"w\", encoding=\"utf-8\") as f:\n json.dump(results_qwen, f, ensure_ascii=False, indent=2)\nprint(\"\\nSaved: tamil_qwen_7b_results.json\")\n\nfree_model(model_1, tok_1)",
|
| 89 |
+
"metadata": {},
|
| 90 |
+
"execution_count": null,
|
| 91 |
+
"outputs": []
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"cell_type": "markdown",
|
| 95 |
+
"id": "fj37ldrjm89",
|
| 96 |
+
"source": "## Step 7: Run Tamil-Llama 13B (Top Tamil Open-Source)\n~20-25 min on RTX 5090",
|
| 97 |
+
"metadata": {}
|
| 98 |
+
},
|
| 99 |
+
{
|
| 100 |
+
"cell_type": "code",
|
| 101 |
+
"id": "5w3ap482t8o",
|
| 102 |
+
"source": "model_2, tok_2 = load_model(\"abhinand/tamil-llama-13b-instruct-v0.1\")\nresults_llama = run_evaluation(model_2, tok_2, test_cases)\nreport_llama = generate_report(results_llama, \"abhinand/tamil-llama-13b-instruct-v0.1\")\n\n# Save results\nwith open(\"tamil_llama_13b_results.json\", \"w\", encoding=\"utf-8\") as f:\n json.dump(results_llama, f, ensure_ascii=False, indent=2)\nprint(\"\\nSaved: tamil_llama_13b_results.json\")\n\nfree_model(model_2, tok_2)",
|
| 103 |
+
"metadata": {},
|
| 104 |
+
"execution_count": null,
|
| 105 |
+
"outputs": []
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"cell_type": "markdown",
|
| 109 |
+
"id": "lld5nwdhbt",
|
| 110 |
+
"source": "## Step 8: Comparison Table (All Models Including GPT-4o-mini)",
|
| 111 |
+
"metadata": {}
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"cell_type": "code",
|
| 115 |
+
"id": "1omd5o1ow5n",
|
| 116 |
+
"source": "# GPT-4o-mini baseline (already benchmarked locally)\nreport_gpt = {\n \"model\": \"gpt-4o-mini\",\n \"overall_accuracy\": 50.7,\n \"total_cases\": 1030,\n \"category_scores\": {\n \"case_suffixes\": 82.1, \"plural_case\": 38.1, \"verb_conjugation\": 45.0,\n \"sandhi\": 42.0, \"honorific\": 30.0, \"negation\": 28.3,\n \"compound\": 62.0, \"conditional\": 68.3, \"novel\": 30.0,\n }\n}\n\nall_reports = [report_qwen, report_llama, report_gpt]\n\n# Print comparison\nprint(f\"\\n{'='*100}\")\nprint(\"TAMIL-MORPH BENCHMARK COMPARISON (1,030 test cases)\")\nprint(f\"{'='*100}\")\nheader = f\"{'Category':<35}\"\nfor r in all_reports:\n short = r['model'].split('/')[-1][:22]\n header += f\" {short:>22}\"\nprint(header)\nprint(\"-\" * 100)\nfor cat_key, cat_name in CATEGORY_NAMES.items():\n row = f\"{cat_name[:35]:<35}\"\n for report in all_reports:\n score = report[\"category_scores\"].get(cat_key, \"-\")\n if isinstance(score, (int, float)):\n row += f\" {score:>21.1f}%\"\n else:\n row += f\" {'N/A':>22}\"\n print(row)\nprint(\"-\" * 100)\nrow = f\"{'OVERALL':<35}\"\nfor report in all_reports:\n row += f\" {report['overall_accuracy']:>21.1f}%\"\nprint(row)\nprint(\"=\" * 100)\n\n# Save combined\nwith open(\"benchmark_comparison.json\", \"w\", encoding=\"utf-8\") as f:\n json.dump(all_reports, f, ensure_ascii=False, indent=2)\nprint(\"\\nSaved: benchmark_comparison.json\")",
|
| 117 |
+
"metadata": {},
|
| 118 |
+
"execution_count": null,
|
| 119 |
+
"outputs": []
|
| 120 |
+
}
|
| 121 |
+
],
|
| 122 |
+
"metadata": {
|
| 123 |
+
"kernelspec": {
|
| 124 |
+
"display_name": "Python 3",
|
| 125 |
+
"language": "python",
|
| 126 |
+
"name": "python3"
|
| 127 |
+
},
|
| 128 |
+
"language_info": {
|
| 129 |
+
"name": "python",
|
| 130 |
+
"version": "3.10.0"
|
| 131 |
+
}
|
| 132 |
+
},
|
| 133 |
+
"nbformat": 4,
|
| 134 |
+
"nbformat_minor": 5
|
| 135 |
+
}
|