mohanprakash462 commited on
Commit
c5e2e67
·
verified ·
1 Parent(s): 3eed452

Upload kaggle_benchmark.ipynb with huggingface_hub

Browse files
Files changed (1) hide show
  1. kaggle_benchmark.ipynb +1 -1
kaggle_benchmark.ipynb CHANGED
@@ -41,7 +41,7 @@
41
  {
42
  "cell_type": "code",
43
  "id": "bess687mgr",
44
- "source": "# Cell 5: Scoring + Inference functions (Kaggle T4 x2 compatible)\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\ndef normalize_tamil(text):\n text = unicodedata.normalize(\"NFC\", text).strip()\n text = re.sub(r'[\"\"\"\\'`\\.\\,\\!\\?\\;\\:\\(\\)\\[\\]\\{\\}]', '', text)\n return text.strip()\n\ndef extract_tamil_answer(response):\n response = response.strip()\n tamil_pattern = re.compile(r'[\\u0B80-\\u0BFF][\\u0B80-\\u0BFF\\s]*[\\u0B80-\\u0BFF]')\n single_tamil = re.compile(r'[\\u0B80-\\u0BFF]+')\n for line in response.split(\"\\n\"):\n line = line.strip()\n if not line: continue\n if any(line.startswith(p) for p in [\"Note:\", \"Explanation:\", \"The \", \"This \", \"Here\"]): continue\n match = tamil_pattern.search(line)\n if match: return normalize_tamil(match.group())\n match = single_tamil.search(line)\n if match: return normalize_tamil(match.group())\n match = tamil_pattern.search(response)\n if match: return normalize_tamil(match.group())\n match = single_tamil.search(response)\n if match: return normalize_tamil(match.group())\n return normalize_tamil(response)\n\ndef score_response(expected, predicted):\n exp_norm, pred_norm = normalize_tamil(expected), normalize_tamil(predicted)\n if not pred_norm: return 0.0\n if exp_norm == pred_norm: return 1.0\n if exp_norm in pred_norm: return 1.0\n if pred_norm in exp_norm and len(pred_norm) > 2: return 0.5\n return 0.0\n\ndef load_model(model_id):\n \"\"\"Load model in 4-bit. Fits 7B on single T4, 14B across T4x2.\"\"\"\n print(f\"\\nLoading {model_id}...\")\n tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n model = AutoModelForCausalLM.from_pretrained(\n model_id, trust_remote_code=True, device_map=\"auto\",\n torch_dtype=torch.float16,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type=\"nf4\",\n ),\n )\n model.eval()\n print(f\" Loaded! GPU memory: {torch.cuda.memory_allocated()/1e9:.1f}GB / {torch.cuda.get_device_properties(0).total_mem/1e9:.1f}GB\")\n return model, tokenizer\n\ndef load_peft_model(base_id, adapter_id):\n \"\"\"Load base model in 4-bit, then apply LoRA adapter.\"\"\"\n from peft import PeftModel\n print(f\"\\nLoading base: {base_id}...\")\n tokenizer = AutoTokenizer.from_pretrained(base_id, trust_remote_code=True)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n base_model = AutoModelForCausalLM.from_pretrained(\n base_id, trust_remote_code=True, device_map=\"auto\",\n torch_dtype=torch.float16,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type=\"nf4\",\n ),\n )\n print(f\" Applying adapter: {adapter_id}...\")\n model = PeftModel.from_pretrained(base_model, adapter_id)\n model.eval()\n print(f\" Loaded! GPU memory: {torch.cuda.memory_allocated()/1e9:.1f}GB / {torch.cuda.get_device_properties(0).total_mem/1e9:.1f}GB\")\n return model, tokenizer\n\ndef generate_response(model, tokenizer, prompt):\n messages = [\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": prompt},\n ]\n try:\n text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n except Exception:\n text = f\"{SYSTEM_PROMPT}\\n\\nUser: {prompt}\\nAssistant:\"\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n outputs = model.generate(\n **inputs, max_new_tokens=64, temperature=0.1, top_p=0.9,\n do_sample=True, repetition_penalty=1.1, pad_token_id=tokenizer.pad_token_id,\n )\n return tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True).strip()\n\ndef free_model(model, tokenizer):\n del model, tokenizer\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n print(\" GPU memory freed.\")\n\ndef run_evaluation(model, tokenizer, test_cases, save_path=None):\n results = []\n for i, tc in enumerate(test_cases):\n raw = generate_response(model, tokenizer, tc[\"prompt\"])\n predicted = extract_tamil_answer(raw)\n sc = score_response(tc[\"expected\"], predicted)\n results.append({\n \"category\": tc[\"category\"], \"sub_id\": tc[\"sub_id\"],\n \"expected\": tc[\"expected\"], \"predicted\": predicted,\n \"raw_response\": raw[:200], \"score\": sc,\n })\n if (i + 1) % 50 == 0 or i == 0:\n cat_so_far = [r for r in results if r[\"category\"] == tc[\"category\"]]\n cat_acc = sum(r[\"score\"] for r in cat_so_far) / len(cat_so_far) if cat_so_far else 0\n print(f\" [{i+1}/{len(test_cases)}] {tc['category']}: {cat_acc:.1%}\")\n # Auto-save every 200 to protect against crashes\n if save_path and (i + 1) % 200 == 0:\n with open(save_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(results, f, ensure_ascii=False, indent=2)\n if save_path:\n with open(save_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(results, f, ensure_ascii=False, indent=2)\n return results\n\ndef generate_report(results, model_name):\n categories = {}\n for r in results:\n cat = r[\"category\"]\n if cat not in categories:\n categories[cat] = {\"total\": 0, \"exact\": 0, \"partial\": 0, \"wrong\": 0, \"score_sum\": 0}\n categories[cat][\"total\"] += 1\n categories[cat][\"score_sum\"] += r[\"score\"]\n if r[\"score\"] == 1.0: categories[cat][\"exact\"] += 1\n elif r[\"score\"] == 0.5: categories[cat][\"partial\"] += 1\n else: categories[cat][\"wrong\"] += 1\n print(f\"\\n{'='*80}\")\n print(f\"RESULTS: {model_name}\")\n print(f\"{'='*80}\")\n print(f\"{'Category':<40} {'Total':>6} {'Exact':>6} {'Partial':>8} {'Wrong':>6} {'Acc':>9}\")\n print(f\"{'-'*80}\")\n overall_score, overall_total, cat_scores = 0, 0, {}\n for cat_key in CATEGORY_NAMES:\n if cat_key in categories:\n c = categories[cat_key]\n acc = c[\"score_sum\"] / c[\"total\"] if c[\"total\"] > 0 else 0\n print(f\"{CATEGORY_NAMES[cat_key]:<40} {c['total']:>6} {c['exact']:>6} {c['partial']:>8} {c['wrong']:>6} {acc:>8.1%}\")\n overall_score += c[\"score_sum\"]; overall_total += c[\"total\"]\n cat_scores[cat_key] = round(acc * 100, 1)\n overall_acc = overall_score / overall_total if overall_total > 0 else 0\n print(f\"{'-'*80}\")\n print(f\"{'OVERALL':<40} {overall_total:>6} {'':>6} {'':>8} {'':>6} {overall_acc:>8.1%}\")\n print(f\"{'='*80}\")\n failures = [r for r in results if r[\"score\"] == 0.0]\n if failures:\n print(f\"\\nSample failures (first 10):\")\n for f in failures[:10]:\n print(f\" [{f['category']}] expected='{f['expected']}' got='{f['predicted']}'\")\n return {\"model\": model_name, \"overall_accuracy\": round(overall_acc * 100, 1),\n \"total_cases\": overall_total, \"category_scores\": cat_scores}\n\nprint(\"All functions ready! GPU available:\", torch.cuda.is_available())\nif torch.cuda.is_available():\n for i in range(torch.cuda.device_count()):\n print(f\" GPU {i}: {torch.cuda.get_device_name(i)} ({torch.cuda.get_device_properties(i).total_mem/1e9:.1f}GB)\")",
45
  "metadata": {},
46
  "execution_count": null,
47
  "outputs": []
 
41
  {
42
  "cell_type": "code",
43
  "id": "bess687mgr",
44
+ "source": "# Cell 5: Scoring + Inference functions (Kaggle T4 x2 compatible)\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\ndef normalize_tamil(text):\n text = unicodedata.normalize(\"NFC\", text).strip()\n text = re.sub(r'[\"\"\"\\'`\\.\\,\\!\\?\\;\\:\\(\\)\\[\\]\\{\\}]', '', text)\n return text.strip()\n\ndef extract_tamil_answer(response):\n response = response.strip()\n tamil_pattern = re.compile(r'[\\u0B80-\\u0BFF][\\u0B80-\\u0BFF\\s]*[\\u0B80-\\u0BFF]')\n single_tamil = re.compile(r'[\\u0B80-\\u0BFF]+')\n for line in response.split(\"\\n\"):\n line = line.strip()\n if not line: continue\n if any(line.startswith(p) for p in [\"Note:\", \"Explanation:\", \"The \", \"This \", \"Here\"]): continue\n match = tamil_pattern.search(line)\n if match: return normalize_tamil(match.group())\n match = single_tamil.search(line)\n if match: return normalize_tamil(match.group())\n match = tamil_pattern.search(response)\n if match: return normalize_tamil(match.group())\n match = single_tamil.search(response)\n if match: return normalize_tamil(match.group())\n return normalize_tamil(response)\n\ndef score_response(expected, predicted):\n exp_norm, pred_norm = normalize_tamil(expected), normalize_tamil(predicted)\n if not pred_norm: return 0.0\n if exp_norm == pred_norm: return 1.0\n if exp_norm in pred_norm: return 1.0\n if pred_norm in exp_norm and len(pred_norm) > 2: return 0.5\n return 0.0\n\ndef _gpu_info():\n \"\"\"Safe GPU info string (works across PyTorch versions).\"\"\"\n try:\n alloc = torch.cuda.memory_allocated() / 1e9\n props = torch.cuda.get_device_properties(0)\n total = getattr(props, 'total_memory', None) or getattr(props, 'total_mem', 0)\n return f\"{alloc:.1f}GB / {total/1e9:.1f}GB\"\n except Exception:\n return \"unknown\"\n\ndef load_model(model_id):\n \"\"\"Load model in 4-bit. Fits 7B on single T4, 14B across T4x2.\"\"\"\n print(f\"\\nLoading {model_id}...\")\n tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n model = AutoModelForCausalLM.from_pretrained(\n model_id, trust_remote_code=True, device_map=\"auto\",\n torch_dtype=torch.float16,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type=\"nf4\",\n ),\n )\n model.eval()\n print(f\" Loaded! GPU memory: {_gpu_info()}\")\n return model, tokenizer\n\ndef load_peft_model(base_id, adapter_id):\n \"\"\"Load base model in 4-bit, then apply LoRA adapter.\"\"\"\n from peft import PeftModel\n print(f\"\\nLoading base: {base_id}...\")\n tokenizer = AutoTokenizer.from_pretrained(base_id, trust_remote_code=True)\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n base_model = AutoModelForCausalLM.from_pretrained(\n base_id, trust_remote_code=True, device_map=\"auto\",\n torch_dtype=torch.float16,\n quantization_config=BitsAndBytesConfig(\n load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_quant_type=\"nf4\",\n ),\n )\n print(f\" Applying adapter: {adapter_id}...\")\n model = PeftModel.from_pretrained(base_model, adapter_id)\n model.eval()\n print(f\" Loaded! GPU memory: {_gpu_info()}\")\n return model, tokenizer\n\ndef generate_response(model, tokenizer, prompt):\n messages = [\n {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n {\"role\": \"user\", \"content\": prompt},\n ]\n try:\n text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n except Exception:\n text = f\"{SYSTEM_PROMPT}\\n\\nUser: {prompt}\\nAssistant:\"\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n outputs = model.generate(\n **inputs, max_new_tokens=64, temperature=0.1, top_p=0.9,\n do_sample=True, repetition_penalty=1.1, pad_token_id=tokenizer.pad_token_id,\n )\n return tokenizer.decode(outputs[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True).strip()\n\ndef free_model(model, tokenizer):\n del model, tokenizer\n gc.collect()\n if torch.cuda.is_available():\n torch.cuda.empty_cache()\n print(\" GPU memory freed.\")\n\ndef run_evaluation(model, tokenizer, test_cases, save_path=None):\n results = []\n for i, tc in enumerate(test_cases):\n raw = generate_response(model, tokenizer, tc[\"prompt\"])\n predicted = extract_tamil_answer(raw)\n sc = score_response(tc[\"expected\"], predicted)\n results.append({\n \"category\": tc[\"category\"], \"sub_id\": tc[\"sub_id\"],\n \"expected\": tc[\"expected\"], \"predicted\": predicted,\n \"raw_response\": raw[:200], \"score\": sc,\n })\n if (i + 1) % 50 == 0 or i == 0:\n cat_so_far = [r for r in results if r[\"category\"] == tc[\"category\"]]\n cat_acc = sum(r[\"score\"] for r in cat_so_far) / len(cat_so_far) if cat_so_far else 0\n print(f\" [{i+1}/{len(test_cases)}] {tc['category']}: {cat_acc:.1%}\")\n # Auto-save every 200 to protect against crashes\n if save_path and (i + 1) % 200 == 0:\n with open(save_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(results, f, ensure_ascii=False, indent=2)\n if save_path:\n with open(save_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(results, f, ensure_ascii=False, indent=2)\n return results\n\ndef generate_report(results, model_name):\n categories = {}\n for r in results:\n cat = r[\"category\"]\n if cat not in categories:\n categories[cat] = {\"total\": 0, \"exact\": 0, \"partial\": 0, \"wrong\": 0, \"score_sum\": 0}\n categories[cat][\"total\"] += 1\n categories[cat][\"score_sum\"] += r[\"score\"]\n if r[\"score\"] == 1.0: categories[cat][\"exact\"] += 1\n elif r[\"score\"] == 0.5: categories[cat][\"partial\"] += 1\n else: categories[cat][\"wrong\"] += 1\n print(f\"\\n{'='*80}\")\n print(f\"RESULTS: {model_name}\")\n print(f\"{'='*80}\")\n print(f\"{'Category':<40} {'Total':>6} {'Exact':>6} {'Partial':>8} {'Wrong':>6} {'Acc':>9}\")\n print(f\"{'-'*80}\")\n overall_score, overall_total, cat_scores = 0, 0, {}\n for cat_key in CATEGORY_NAMES:\n if cat_key in categories:\n c = categories[cat_key]\n acc = c[\"score_sum\"] / c[\"total\"] if c[\"total\"] > 0 else 0\n print(f\"{CATEGORY_NAMES[cat_key]:<40} {c['total']:>6} {c['exact']:>6} {c['partial']:>8} {c['wrong']:>6} {acc:>8.1%}\")\n overall_score += c[\"score_sum\"]; overall_total += c[\"total\"]\n cat_scores[cat_key] = round(acc * 100, 1)\n overall_acc = overall_score / overall_total if overall_total > 0 else 0\n print(f\"{'-'*80}\")\n print(f\"{'OVERALL':<40} {overall_total:>6} {'':>6} {'':>8} {'':>6} {overall_acc:>8.1%}\")\n print(f\"{'='*80}\")\n failures = [r for r in results if r[\"score\"] == 0.0]\n if failures:\n print(f\"\\nSample failures (first 10):\")\n for f in failures[:10]:\n print(f\" [{f['category']}] expected='{f['expected']}' got='{f['predicted']}'\")\n return {\"model\": model_name, \"overall_accuracy\": round(overall_acc * 100, 1),\n \"total_cases\": overall_total, \"category_scores\": cat_scores}\n\nprint(\"All functions ready! GPU available:\", torch.cuda.is_available())\nif torch.cuda.is_available():\n for i in range(torch.cuda.device_count()):\n print(f\" GPU {i}: {torch.cuda.get_device_name(i)} ({_gpu_info()})\")",
45
  "metadata": {},
46
  "execution_count": null,
47
  "outputs": []