File size: 5,656 Bytes
dcc24f8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 |
"""
Test v6 Model on Real Email Benchmark.
Runs the model on 100 real emails from your MBOX
and measures accuracy per field.
Author: Ranjit Behera
"""
import json
import subprocess
import sys
import re
from pathlib import Path
from collections import defaultdict
MODEL_PATH = "models/base/phi3-finance-base"
ADAPTER_PATH = "models/adapters/finance-lora-v7"
BENCHMARK_FILE = "data/benchmark/real_emails_benchmark.json"
def generate(prompt: str) -> str:
"""Generate response using mlx_lm.generate."""
cmd = [
sys.executable, "-m", "mlx_lm.generate",
"--model", MODEL_PATH,
"--adapter-path", ADAPTER_PATH,
"--prompt", prompt,
"--max-tokens", "200"
]
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=120)
return result.stdout
except Exception as e:
return f"Error: {e}"
def parse_json_from_output(output: str) -> dict:
"""Extract JSON from model output."""
try:
match = re.search(r'\{[^{}]+\}', output, re.DOTALL)
if match:
return json.loads(match.group())
except:
pass
return {}
def normalize_value(val: str) -> str:
"""Normalize a value for comparison."""
if not val:
return ''
val = str(val).lower().strip()
val = val.replace(',', '').replace('.00', '').rstrip('0').rstrip('.')
return val
def run_real_benchmark(limit: int = 20):
"""Run benchmark on real emails."""
print("=" * 70)
print("๐งช REAL EMAIL BENCHMARK - finance-lora-v6")
print("=" * 70)
print(f"Model: {MODEL_PATH}")
print(f"Adapter: {ADAPTER_PATH}")
print()
# Load benchmark
with open(BENCHMARK_FILE) as f:
benchmark = json.load(f)
# Filter for good candidates (have amount and bank detected)
good_samples = [s for s in benchmark
if s['expected_entities'].get('amount')
and s['expected_entities'].get('bank')]
if limit:
good_samples = good_samples[:limit]
print(f"Testing {len(good_samples)} real emails with auto-extracted labels...")
print()
# Track results
field_stats = defaultdict(lambda: {'correct': 0, 'total': 0})
bank_stats = defaultdict(lambda: {'correct': 0, 'total': 0})
results = []
for i, sample in enumerate(good_samples):
text = sample['text']
expected = sample['expected_entities']
bank = expected.get('bank', 'unknown')
# Create prompt
prompt = f"""Extract financial entities from this email:
{text[:500]}
Extract: amount, type, date, account, reference, merchant
Output JSON:"""
# Generate
output = generate(prompt)
predicted = parse_json_from_output(output)
# Compare fields
sample_correct = 0
sample_total = 0
for field in ['amount', 'type', 'date', 'account', 'reference']:
exp_val = normalize_value(expected.get(field, ''))
pred_val = normalize_value(predicted.get(field, ''))
if exp_val: # Only count if we have expected value
field_stats[field]['total'] += 1
sample_total += 1
if exp_val == pred_val:
field_stats[field]['correct'] += 1
sample_correct += 1
# Track bank accuracy
bank_stats[bank]['total'] += 1
if sample_total > 0 and sample_correct == sample_total:
bank_stats[bank]['correct'] += 1
results.append({
'id': sample['id'],
'expected': expected,
'predicted': predicted,
'accuracy': sample_correct / sample_total if sample_total > 0 else 0
})
# Progress
if (i + 1) % 5 == 0:
print(f" Processed {i + 1}/{len(good_samples)}...")
# Print results
print()
print("=" * 70)
print("๐ RESULTS BY FIELD (on REAL emails)")
print("=" * 70)
for field in ['amount', 'type', 'date', 'account', 'reference']:
if field in field_stats:
stats = field_stats[field]
acc = stats['correct'] / stats['total'] * 100 if stats['total'] > 0 else 0
status = "โ
" if acc >= 90 else "โ ๏ธ" if acc >= 70 else "โ"
print(f" {field:12} {stats['correct']:3}/{stats['total']:3} = {acc:5.1f}% {status}")
print()
print("=" * 70)
print("๐ RESULTS BY BANK")
print("=" * 70)
for bank in sorted(bank_stats.keys()):
stats = bank_stats[bank]
acc = stats['correct'] / stats['total'] * 100 if stats['total'] > 0 else 0
status = "โ
" if acc >= 80 else "โ ๏ธ" if acc >= 50 else "โ"
print(f" {bank.upper():12} {stats['correct']:3}/{stats['total']:3} = {acc:5.1f}% {status}")
# Show some failures
failures = [r for r in results if r['accuracy'] < 1.0][:3]
if failures:
print()
print("=" * 70)
print("โ SAMPLE FAILURES (for debugging)")
print("=" * 70)
for f in failures:
print(f"\n ID {f['id']}:")
print(f" Expected: {f['expected']}")
print(f" Predicted: {f['predicted']}")
print()
print("=" * 70)
print("โ
Real Email Benchmark Complete!")
print("=" * 70)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--limit', type=int, default=15, help='Number of samples to test')
args = parser.parse_args()
run_real_benchmark(limit=args.limit)
|