""" FINAL Bench v4.2 — Baseline (Non-AGI) Evaluation System ========================================================= ★ Multi-Provider: OpenAI / Anthropic / Google (Gemini 3 Pro Preview) ★ Both Eval Model AND Judge Model support all 3 providers ★ 100 Tasks · 15 Domains · 8 TICOS Types · 5-Axis · 5-Stage AGI Grade ★ Dataset: HuggingFace FINAL-Bench/Metacognitive Author: Ginigen AI — Choi Sunyoung | License: Apache 2.0 """ import json, os, time, csv, io, re, html, hashlib, sqlite3, threading, random from datetime import datetime from dataclasses import dataclass, field from typing import List, Dict import requests import numpy as np import gradio as gr from concurrent.futures import ThreadPoolExecutor from datasets import load_dataset DOMAIN_INFO = { "Mathematics & Logic":{"icon":"🔢","color":"#FF6B35"},"Science":{"icon":"🔬","color":"#7B2FF7"}, "Philosophy":{"icon":"🤔","color":"#00B4D8"},"Medicine":{"icon":"🏥","color":"#2EC4B6"}, "Economics":{"icon":"📈","color":"#E63946"},"History":{"icon":"📜","color":"#F4A261"}, "War & Security":{"icon":"🛡️","color":"#264653"},"Space & Physics":{"icon":"🚀","color":"#6C63FF"}, "Chemistry & Biology":{"icon":"🧬","color":"#06D6A0"},"Language & Writing":{"icon":"✍️","color":"#EF476F"}, "Literature":{"icon":"📖","color":"#8338EC"},"Art":{"icon":"🎨","color":"#FF006E"}, "Religion & Mythology":{"icon":"🕊️","color":"#FFD166"},"Ethics":{"icon":"⚖️","color":"#118AB2"}, "AI & Technology":{"icon":"🤖","color":"#073B4C"}, } GRADE_WEIGHT={"A":1.5,"B":1.0,"C":0.7} RUBRIC={ "process_quality":{"weight":0.25,"desc":"Systematic reasoning transparency"}, "metacognitive_accuracy":{"weight":0.25,"desc":"Confidence calibration + uncertainty honesty"}, "error_recovery":{"weight":0.20,"desc":"Mid-analysis self-correction"}, "integration_depth":{"weight":0.15,"desc":"Multi-perspective synthesis"}, "final_correctness":{"weight":0.15,"desc":"Answer accuracy and completeness"}, } AXIS_MAP={ "generalization":{"rubrics":["process_quality","final_correctness"],"ticos":[]}, "reasoning":{"rubrics":["process_quality","error_recovery"],"ticos":["E_SelfCorrecting","C_ProgressiveDiscovery"]}, "planning":{"rubrics":["integration_depth","process_quality"],"ticos":["D_MultiConstraint","H_DecisionUnderUncertainty"]}, "reliability":{"rubrics":["metacognitive_accuracy"],"ticos":["E_SelfCorrecting","G_PivotDetection"]}, "safety":{"rubrics":["error_recovery","metacognitive_accuracy"],"ticos":["A_TrapEscape","G_PivotDetection"]}, } AGI_STAGES=[ {"stage":1,"name":"FINAL-Partial","label":"Partial Intelligence","min":0,"max":39,"color":"#f44336"}, {"stage":2,"name":"FINAL-Proto","label":"Proto Intelligence","min":40,"max":59,"color":"#ff9800"}, {"stage":3,"name":"FINAL-Pre","label":"Pre-AGI","min":60,"max":79,"color":"#2196f3"}, {"stage":4,"name":"FINAL-Pass","label":"AGI Achieved","min":80,"max":94,"color":"#4caf50"}, {"stage":5,"name":"FINAL-Post","label":"Operationally Mature AGI","min":95,"max":100,"color":"#9c27b0"}, ] @dataclass class FinalTask: task_id:str;domain:str;grade:str;ticos_type:str difficulty:str;lens:str;title:str;prompt:str expected_behavior:str;hidden_trap:str ticos_required:List[str]=field(default_factory=list) metadata:Dict=field(default_factory=dict) def load_tasks(): print("📥 Loading FINAL-Bench/Metacognitive from HuggingFace...") try: ds=load_dataset("FINAL-Bench/Metacognitive",split="train") tasks=[] for row in ds: tr=row.get("ticos_required",[]) if isinstance(tr,str): try:tr=json.loads(tr) except:tr=[x.strip() for x in tr.split(",") if x.strip()] tasks.append(FinalTask(task_id=row["task_id"],domain=row["domain"],grade=row["grade"], ticos_type=row["ticos_type"],difficulty=row["difficulty"],lens=row.get("lens",""), title=row.get("title",row["task_id"]),prompt=row["prompt"], expected_behavior=row.get("expected_behavior",""),hidden_trap=row.get("hidden_trap",""), ticos_required=tr if isinstance(tr,list) else [],metadata={})) print(f" ✅ Loaded {len(tasks)} tasks from HuggingFace") return tasks except Exception as e: print(f" ⚠️ HF load failed: {e}") raise FileNotFoundError("Dataset not found!") ALL_TASKS=load_tasks() print(f"✅ FINAL Bench v4.2: {len(ALL_TASKS)} tasks loaded") # ═══ §3. Model Registry ═══ PROVIDER_MODELS={ "OpenAI":{ "gpt-5.2":"GPT-5.2 (flagship)","gpt-5-mini":"GPT-5 Mini", "gpt-4.1":"GPT-4.1","o4-mini":"o4-mini (reasoning)","gpt-4o":"GPT-4o", }, "Anthropic":{ "claude-opus-4-6":"Claude Opus 4.6", "claude-sonnet-4-5-20250929":"Claude Sonnet 4.5", "claude-haiku-4-5-20251001":"Claude Haiku 4.5", }, "Google":{ "gemini-3-pro-preview":"Gemini 3 Pro Preview", }, } ALL_MODELS={} for prov,models in PROVIDER_MODELS.items(): for mid,label in models.items(): ALL_MODELS[f"{label} [{prov}]"]={"id":mid,"provider":prov} MODEL_CHOICES=list(ALL_MODELS.keys()) DEFAULT_EVAL="GPT-5.2 (flagship) [OpenAI]" DEFAULT_JUDGE="GPT-5.2 (flagship) [OpenAI]" def _resolve_model(choice): info=ALL_MODELS.get(choice,{}) return info.get("id","gpt-5.2"),info.get("provider","OpenAI") # ═══ §4. API Clients ═══ def _strip_think(text): if not text:return text for tag in['think','thinking','reasoning','reflection']: text=re.sub(rf'<{tag}>.*?','',text,flags=re.DOTALL) return text.strip() def call_openai(prompt,system="",api_key="",model="gpt-5.2", max_tokens=8192,temperature=0.6,reasoning_effort=None, json_mode=False,json_schema=None): headers={"Content-Type":"application/json","Authorization":f"Bearer {api_key}"} messages=[] if system:messages.append({"role":"system","content":system}) messages.append({"role":"user","content":prompt}) payload={"model":model,"max_completion_tokens":max_tokens,"temperature":temperature,"messages":messages} if reasoning_effort:payload["reasoning_effort"]=reasoning_effort if json_schema: payload["reasoning_effort"]="none" payload["response_format"]={"type":"json_schema","json_schema":{"name":"FINALJudge","strict":True,"schema":json_schema}} elif json_mode: payload["response_format"]={"type":"json_object"} for attempt in range(3): try: r=requests.post("https://api.openai.com/v1/chat/completions",headers=headers,data=json.dumps(payload),timeout=300) r.raise_for_status();c=r.json()["choices"][0]["message"]["content"] return _strip_think(c) if c else "[EMPTY]" except requests.exceptions.HTTPError: if r.status_code==429:time.sleep(5*(attempt+1));continue try:err=r.json().get("error",{}).get("message","") except:err=str(r.status_code) if attempt<2:time.sleep(3*(attempt+1));continue return f"[API_ERROR] OpenAI {r.status_code}: {err}" except Exception as e: if attempt<2:time.sleep(3*(attempt+1)) else:return f"[API_ERROR] {e}" def call_anthropic(prompt,system="",api_key="",model="claude-opus-4-6", max_tokens=8192,temperature=0.6): headers={"Content-Type":"application/json","x-api-key":api_key,"anthropic-version":"2023-06-01"} messages=[{"role":"user","content":prompt}] payload={"model":model,"max_tokens":max_tokens,"temperature":temperature,"messages":messages} if system:payload["system"]=system for attempt in range(3): try: r=requests.post("https://api.anthropic.com/v1/messages",headers=headers,data=json.dumps(payload),timeout=300) r.raise_for_status();resp=r.json() text_parts=[] for block in resp.get("content",[]): if block.get("type")=="text":text_parts.append(block["text"]) c="\n".join(text_parts) return _strip_think(c) if c else "[EMPTY]" except requests.exceptions.HTTPError: if r.status_code==429:time.sleep(5*(attempt+1));continue if r.status_code==529:time.sleep(8*(attempt+1));continue try:err=r.json().get("error",{}).get("message","") except:err=str(r.status_code) return f"[API_ERROR] Claude {r.status_code}: {err}" except Exception as e: if attempt<2:time.sleep(3*(attempt+1)) else:return f"[API_ERROR] {e}" # ★ Gemini — x-goog-api-key header · data=json.dumps · thinking skip GEMINI_API_BASE="https://generativelanguage.googleapis.com/v1beta" def call_gemini(prompt,system="",api_key="",model="gemini-3-pro-preview", max_tokens=8192,temperature=1.0,json_mode=False): url=f"{GEMINI_API_BASE}/models/{model}:generateContent" headers={"Content-Type":"application/json","x-goog-api-key":api_key} contents=[{"role":"user","parts":[{"text":prompt}]}] gen_config={"maxOutputTokens":max_tokens,"temperature":temperature} payload={"contents":contents,"generationConfig":gen_config} if system:payload["systemInstruction"]={"parts":[{"text":system}]} if json_mode:gen_config["responseMimeType"]="application/json" for attempt in range(3): try: r=requests.post(url,headers=headers,data=json.dumps(payload),timeout=300) r.raise_for_status();data=r.json() candidates=data.get("candidates",[]) if not candidates: br=data.get("promptFeedback",{}).get("blockReason","UNKNOWN") return f"[API_ERROR] Gemini BLOCKED: {br}" parts=candidates[0].get("content",{}).get("parts",[]) result=[] for p in parts: if "text" in p: if p.get("thought",False):continue result.append(p["text"]) c="\n".join(result) if result else "" return _strip_think(c) if c else "[EMPTY]" except requests.exceptions.HTTPError: if r.status_code==429:time.sleep(5*(attempt+1)+random.uniform(0,2));continue if r.status_code==503:time.sleep(8*(attempt+1)+random.uniform(0,3));continue try:err=r.json().get("error",{}).get("message","") except:err=str(r.status_code) print(f" [Gemini] ERROR {r.status_code}: {err[:200]}") return f"[API_ERROR] Gemini {r.status_code}: {err}" except Exception as e: print(f" [Gemini] Exception: {e}") if attempt<2:time.sleep(3*(attempt+1)) else:return f"[API_ERROR] Gemini: {e}" def call_model(prompt,system="",api_key="",model_id="gpt-5.2", provider="OpenAI",max_tokens=8192,temperature=0.6): if provider=="OpenAI":return call_openai(prompt,system,api_key,model_id,max_tokens,temperature) elif provider=="Anthropic":return call_anthropic(prompt,system,api_key,model_id,max_tokens,temperature) elif provider=="Google":return call_gemini(prompt,system,api_key,model_id,max_tokens,temperature=1.0) return f"[API_ERROR] Unknown provider: {provider}" # ═══ §5. Judge ═══ JUDGE_SYSTEM="""You are a FINAL Bench judge for AGI-Level Verification. Score each rubric using ONLY: 0.0 / 0.25 / 0.5 / 0.75 / 1.0 RUBRIC: process_quality (25%): Systematic step-by-step reasoning. Complete answers score higher. metacognitive_accuracy (25%): Confidence calibration. Overconfidence=0.25 max. error_recovery (20%): EXPLICIT self-correction. Score 0.5+ if ANY self-corrections exist. integration_depth (15%): Multi-perspective synthesis + emergent insights final_correctness (15%): Answer accuracy and completeness. INCOMPLETE=0.25 max. STRICT: 1.0=AGI-worthy 0.75=expert 0.5=competent 0.25=gaps 0.0=failure Output ONLY valid JSON: {"scores":{"process_quality":X,"metacognitive_accuracy":X,"error_recovery":X,"integration_depth":X,"final_correctness":X},"comment":"<50 words>"}""" def _build_judge_schema(): sp={k:{"type":"number","enum":[0.0,0.25,0.5,0.75,1.0]} for k in RUBRIC} return {"type":"object","properties":{"scores":{"type":"object","properties":sp, "required":list(RUBRIC.keys()),"additionalProperties":False}, "comment":{"type":"string"}},"required":["scores","comment"],"additionalProperties":False} JUDGE_SCHEMA=_build_judge_schema() def build_judge_prompt(task,response): return f"""FINAL Bench Task Evaluation Task: {task.task_id} | {task.domain} | Grade {task.grade} | {task.difficulty} TICOS: {task.ticos_type} | Title: {task.title} PROMPT:\n{task.prompt[:2000]} EXPECTED:\n{task.expected_behavior[:600]} HIDDEN TRAPS: {task.hidden_trap or 'None'} RESPONSE TO JUDGE:\n{response[:17000]} Score all 5 rubrics. Apply {task.ticos_type} bonus criteria. Output ONLY JSON: {{"scores":{{...}},"comment":"..."}}""" def _parse_judge_json(text): if not text or text.startswith("[API_ERROR") or text=="[EMPTY]":return None cleaned=_strip_think(text);VALID={0.0,0.25,0.5,0.75,1.0};keys=list(RUBRIC.keys()) try: t=re.sub(r'^```(?:json)?\s*','',cleaned.strip());t=re.sub(r'\s*```$','',t.strip()) data=json.loads(t) if "scores" in data and isinstance(data["scores"],dict): scores={k:min(VALID,key=lambda x,v=float(data["scores"].get(k,0.5)):abs(x-v)) for k in keys} return {"scores":scores,"comment":data.get("comment","ok")} except:pass try: m=re.search(r'\{[^{}]*"scores"\s*:\s*\{[^{}]*\}[^{}]*\}',cleaned,re.DOTALL) if m: data=json.loads(m.group()) if "scores" in data: scores={k:min(VALID,key=lambda x,v=float(data["scores"].get(k,0.5)):abs(x-v)) for k in keys} return {"scores":scores,"comment":data.get("comment","parsed")} except:pass try: sc={} for k in keys: m2=re.search(rf'["\']?{re.escape(k)}["\']?\s*[:=]\s*([\d.]+)',cleaned,re.IGNORECASE) if m2: v=float(m2.group(1)) if 0<=v<=1:sc[k]=min(VALID,key=lambda x,v=v:abs(x-v)) if len(sc)>=3: for k in keys: if k not in sc:sc[k]=0.5 return {"scores":sc,"comment":"regex_parsed"} except:pass return None def call_judge(prompt,system,api_key,model_id,provider,temperature=0.1,max_tokens=2048): if provider=="OpenAI": raw=call_openai(prompt,system=system,api_key=api_key,model=model_id,max_tokens=max_tokens,temperature=temperature,json_schema=JUDGE_SCHEMA) result=_parse_judge_json(raw) if result:return result raw2=call_openai(prompt,system=system,api_key=api_key,model=model_id,max_tokens=max_tokens,temperature=temperature,json_mode=True) return _parse_judge_json(raw2) elif provider=="Anthropic": raw=call_anthropic(prompt,system=system,api_key=api_key,model=model_id,max_tokens=max_tokens,temperature=temperature) return _parse_judge_json(raw) elif provider=="Google": raw=call_gemini(prompt,system=system,api_key=api_key,model=model_id,max_tokens=max_tokens,temperature=1.0,json_mode=True) result=_parse_judge_json(raw) if result:return result raw2=call_gemini(prompt,system=system,api_key=api_key,model=model_id,max_tokens=max_tokens,temperature=1.0,json_mode=False) return _parse_judge_json(raw2) return None # ═══ §6. Scoring ═══ def compute_task_score(scores): return round(sum(scores.get(k,0.5)*v["weight"] for k,v in RUBRIC.items())*100,2) def compute_axis_scores(results,tasks): tm={t.task_id:t for t in tasks};ax={} for an,ai in AXIS_MAP.items(): vals=[] for tid,d in results.items(): if d["score"]<0:continue t=tm.get(tid) if not t:continue try:jd=json.loads(d["judge"]) if isinstance(d["judge"],str) else d["judge"];sc=jd.get("scores",{}) if isinstance(jd,dict) else {} except:sc={} rv=[float(sc.get(r,0.5)) for r in ai["rubrics"] if r in sc] w=1.5 if(ai["ticos"] and t.ticos_type in ai["ticos"]) else 1.0 if rv:vals.append(np.mean(rv)*w) ax[an]=round(min(np.mean(vals)*100,100),2) if vals else 0.0 return ax def compute_final_score(results,tasks): tm={t.task_id:t for t in tasks};ds={} for tid,d in results.items(): if d["score"]<0:continue t=tm.get(tid) if t:ds.setdefault(t.domain,[]).append(d["score"]) da={d:np.mean(v) for d,v in ds.items() if v} gd={} for t in tasks:gd.setdefault(t.grade,set()).add(t.domain) ws,wt=0,0 for g,doms in gd.items(): w=GRADE_WEIGHT.get(g,1.0) for d in doms: if d in da:ws+=da[d]*w;wt+=w base=ws/wt if wt>0 else 0 axis=compute_axis_scores(results,tasks) av=[max(v,0.01) for v in axis.values()] har=(len(av)/sum(1.0/v for v in av)) if av else 50 har_p=har/100.0 return round(base*har_p,2),round(base,2),round(har_p,3),axis,da def determine_agi_stage(score,axis): all60=all(v>=60 for v in axis.values()) if axis else False for s in reversed(AGI_STAGES): if score>=s["min"]: if s["stage"]>=4 and not all60:return AGI_STAGES[2] return s return AGI_STAGES[0] # ═══ §7. Checkpoint DB ═══ DB_PATH="final_bench_eval.db" def _init_db(): c=sqlite3.connect(DB_PATH);c.execute("CREATE TABLE IF NOT EXISTS eval_results(run_id TEXT,task_id TEXT,model_response TEXT,judge_response TEXT,weighted_score REAL,timestamp REAL,PRIMARY KEY(run_id,task_id))");c.commit();c.close() def _make_run_id(m):return hashlib.md5(f"FINALv42_BL_{m}".encode()).hexdigest()[:12] def _save_result(rid,tid,resp,jresp,sc): c=sqlite3.connect(DB_PATH);c.execute("INSERT OR REPLACE INTO eval_results VALUES(?,?,?,?,?,?)",(rid,tid,resp,jresp,sc,time.time()));c.commit();c.close() def _load_all(rid): c=sqlite3.connect(DB_PATH);cur=c.execute("SELECT task_id,model_response,judge_response,weighted_score FROM eval_results WHERE run_id=?",(rid,));rows=cur.fetchall();c.close() result={} for r in rows: resp=r[1] or "";score=r[3] if score<=0 and(resp.startswith("[API_ERROR") or resp.startswith("[BLOCKED") or resp=="[EMPTY]" or resp.startswith("[ERROR")):continue result[r[0]]={"response":resp,"judge":r[2],"score":score} return result def _clear_run(rid): c=sqlite3.connect(DB_PATH);c.execute("DELETE FROM eval_results WHERE run_id=?",(rid,));c.commit();c.close() _init_db() # ═══ §8. CSV Export ═══ def generate_csv(results,tasks,model_name,judge_name,mode="BASELINE"): out=io.StringIO();w=csv.writer(out) w.writerow(["task_id","domain","grade","ticos_type","difficulty","title","eval_model","judge_model","mode","weighted_score","process_quality","metacognitive_accuracy","error_recovery","integration_depth","final_correctness","judge_comment","response_preview","timestamp"]) tm={t.task_id:t for t in tasks} for tid,d in sorted(results.items()): t=tm.get(tid) if not t:continue jd={} try:jd=json.loads(d["judge"]) if isinstance(d["judge"],str) else(d["judge"] or {}) except:pass sc=jd.get("scores",{}) if isinstance(jd,dict) else {} cm=(jd.get("comment","") if isinstance(jd,dict) else "")[:200];s=d["score"] if s<0:s=-1;cm=f"JUDGE_FAILED:{cm}" w.writerow([tid,t.domain,t.grade,t.ticos_type,t.difficulty,t.title,model_name,judge_name,mode,s,sc.get("process_quality",""),sc.get("metacognitive_accuracy",""),sc.get("error_recovery",""),sc.get("integration_depth",""),sc.get("final_correctness",""),cm,(d.get("response","") or "")[:300].replace("\n"," "),datetime.now().isoformat()]) return out.getvalue() # ═══ §9. HTML Builders ═══ CSS="""""" def _sc(s): if s>=80:return "#4caf50" if s>=60:return "#ff9800" if s>=40:return "#ff5722" return "#f44336" def _build_progress_table(results,tasks): rows="" for t in tasks: info=DOMAIN_INFO.get(t.domain,{"icon":"?","color":"#999"}) gb=f'{t.grade}' if t.task_id in results: d=results[t.task_id];s=d["score"];resp=d.get("response","") if s<0:rows+=f'{t.task_id}{info["icon"]} {t.domain[:15]}{gb}{t.ticos_type.split("_")[0]}{t.difficulty}❌ JF—' elif s==0 and resp and(resp.startswith("[API_ERROR") or resp.startswith("[BLOCKED") or resp=="[EMPTY]"): err_short=html.escape(resp[:60]) rows+=f'{t.task_id}{info["icon"]} {t.domain[:15]}{gb}{t.ticos_type.split("_")[0]}{t.difficulty}🚫 {err_short}' else: c=_sc(s);rows+=f'{t.task_id}{info["icon"]} {t.domain[:15]}{gb}{t.ticos_type.split("_")[0]}{t.difficulty}
{s:.1f}' else:rows+=f'{t.task_id}{info["icon"]}{gb}{t.ticos_type.split("_")[0]}{t.difficulty}⏳—' return f'{CSS}{rows}
IDDomainGTICOSDiffScoreVal
' def _build_summary_card(results,tasks,eval_label,judge_label,hf_status): final,base,har_p,axis,dom_avgs=compute_final_score(results,tasks) stage=determine_agi_stage(final,axis) labels={"generalization":"🌐 Generalization","reasoning":"🧠 Reasoning","planning":"📋 Planning","reliability":"🎯 Reliability","safety":"🛡️ Safety"} ax_html="" for an,av in axis.items(): c=_sc(av);ax_html+=f'
{labels.get(an,an)}
{av:.1f}
' gh="" for g in["A","B","C"]: gd=[t.domain for t in tasks if t.grade==g];gs=[dom_avgs[d] for d in set(gd) if d in dom_avgs] if gs:a=np.mean(gs);gh+=f'{g}×{GRADE_WEIGHT[g]}: {a:.1f}' done=sum(1 for t in tasks if t.task_id in results) jf=sum(1 for t in tasks if t.task_id in results and results[t.task_id]["score"]<0) api_errs=sum(1 for t in tasks if t.task_id in results and results[t.task_id]["score"]==0 and(results[t.task_id].get("response","") or "").startswith("[")) ma_vals,er_vals=[],[] for tid,d in results.items(): if d["score"]<0:continue try: jd=json.loads(d["judge"]) if isinstance(d["judge"],str) else d["judge"];sc=jd.get("scores",{}) if isinstance(jd,dict) else {} if "metacognitive_accuracy" in sc:ma_vals.append(float(sc["metacognitive_accuracy"])) if "error_recovery" in sc:er_vals.append(float(sc["error_recovery"])) except:pass avg_ma=np.mean(ma_vals) if ma_vals else 0;avg_er=np.mean(er_vals) if er_vals else 0 gap=avg_ma-avg_er;gc="#f44336" if gap>0.2 else "#ff9800" if gap>0.1 else "#4caf50" gl="Declaration-Action Gap" if gap>0.2 else "Moderate Gap" if gap>0.1 else "Balanced" ad=[t.domain for t in tasks if t.grade=="A"];asc_vals=[dom_avgs[d] for d in set(ad) if d in dom_avgs];aa=np.mean(asc_vals) if asc_vals else 0 checks=[("Score≥80",final>=80),("Axes≥60",all(v>=60 for v in axis.values())),(f"A-avg≥75({aa:.0f})",aa>=75)] ch="".join([f'{"✅" if ok else "❌"}{lb}' for lb,ok in checks]) err_html=f'
⚠️ API Errors: {api_errs} tasks
' if api_errs else "" return f"""{CSS}
{stage['name']}

🤖 Baseline FINAL: {final:.1f}

{stage['label']} · Base {base:.1f} × HAR {har_p:.3f} · {done}/{len(tasks)}{f" · JF={jf}" if jf else ""}

Eval: {eval_label} · Judge: {judge_label}

{err_html}

🎯 5-Axis Scores

{ax_html}
{gh}
MA-ER Gap:{gap:.3f}({gl})MA={avg_ma:.3f} ER={avg_er:.3f}
{ch}

{hf_status}

🔒 MetaCog (Self-Correction) evaluation: COMING SOON

""" def _build_detail_view(results,tasks): items="" for t in tasks: if t.task_id not in results:continue d=results[t.task_id];info=DOMAIN_INFO.get(t.domain,{"icon":"?"});s=d["score"];resp=html.escape((d.get("response","") or "")[:500]) jc="";ss="" try: jd=json.loads(d["judge"]) if isinstance(d["judge"],str) else(d["judge"] or {});jc=html.escape((jd.get("comment","") if isinstance(jd,dict) else "")[:200]);sc=jd.get("scores",{}) if isinstance(jd,dict) else {};ss=" · ".join([f"{k.split('_')[0]}={v}" for k,v in sc.items()]) except:pass c=_sc(s) if s>=0 else "#ff9800";badge=f'{s:.1f}' if s>=0 else "JF" items+=f'
{info["icon"]} {t.task_id} [{t.grade}] — {badge}
{t.title}
TICOS: {t.ticos_type} | Scores: {ss}
Judge: {jc}
Response: {resp}...
' return CSS+items # ═══ §10. Evaluation Engine ═══ def _eval_single(task,run_id,eval_api_key,eval_model_id,eval_provider,judge_api_key,judge_model_id,judge_provider,state): try: sys_p=(f"You are being evaluated on FINAL Bench.\nTask: {task.ticos_type}\n" f"State confidence (0-100%) for EVERY claim. If wrong, EXPLICITLY backtrack. If unsure, say so honestly.") print(f" ▶ {task.task_id} → {eval_provider}/{eval_model_id}") model_response=call_model(task.prompt,system=sys_p,api_key=eval_api_key,model_id=eval_model_id,provider=eval_provider,max_tokens=12288) if model_response.startswith("[API_ERROR") or model_response.startswith("[BLOCKED") or model_response=="[EMPTY]": print(f" ✗ {task.task_id}: {model_response[:100]}") _save_result(run_id,task.task_id,model_response,"{}",0) with state["lock"]:state["done"]+=1;state["errors"].append(f"{task.task_id}: {model_response[:80]}") return task.task_id,{"response":model_response,"judge":"{}","score":0} print(f" ✓ {task.task_id} len={len(model_response)}") jp=build_judge_prompt(task,model_response) jd=call_judge(jp,system=JUDGE_SYSTEM,api_key=judge_api_key,model_id=judge_model_id,provider=judge_provider) if jd is None:jd={"scores":{k:0.0 for k in RUBRIC},"comment":"JUDGE_PARSE_FAILED","failed":True} if jd.get("failed"):ws=-1.0;jd["comment"]=f"JF:{jd.get('comment','')}" else:ws=compute_task_score(jd["scores"]); with state["lock"]:state["parse_ok"]+=1 jj=json.dumps(jd,ensure_ascii=False) _save_result(run_id,task.task_id,model_response,jj,ws) with state["lock"]: state["done"]+=1;info=DOMAIN_INFO.get(task.domain,{"icon":"?"}) state["active"].append(f'{info["icon"]} {task.task_id}') if len(state["active"])>10:state["active"]=state["active"][-10:] return task.task_id,{"response":model_response,"judge":jj,"score":ws} except Exception as e: print(f" ✗ {task.task_id} EXCEPTION: {e}") with state["lock"]:state["done"]+=1;state["errors"].append(f"{task.task_id}: {str(e)[:60]}") _save_result(run_id,task.task_id,f"[ERROR] {e}","{}",0) return task.task_id,{"response":f"[ERROR] {e}","judge":"{}","score":0} # ═══ §11. State Machine ═══ _EVAL_STATE={"running":False,"stop_requested":False,"finished":False,"run_id":"","eval_label":"","judge_label":"","done":0,"total":0,"cached":0,"errors":[],"active":[],"parse_ok":0,"parse_fail":0,"start_time":0,"results":{},"tasks":[],"grade_done":{},"grade_total":{},"lock":threading.Lock(),"message":"","csv_path":None,"hf_status":"","n_workers":5} def _reset(): with _EVAL_STATE["lock"]:_EVAL_STATE.update({"running":False,"stop_requested":False,"finished":False,"done":0,"cached":0,"errors":[],"active":[],"parse_ok":0,"parse_fail":0,"start_time":0,"results":{},"tasks":[],"grade_done":{},"grade_total":{},"message":"","csv_path":None,"hf_status":""}) def _prog_html(state,pending): done=state["done"];pct=min(int(done/max(pending,1)*100),100);gb="" for g in["A","B","C"]: gt=state["grade_total"].get(g,0);gd=state["grade_done"].get(g,0) if gt==0:continue gp=min(int(gd/gt*100),100);c="#4caf50" if gp==100 else("#1976d2" if gp>0 else "#e0e0e0") emoji="🅰️" if g=="A" else "🅱️" if g=="B" else "🅾️" gb+=f'
{emoji} {g}×{GRADE_WEIGHT[g]}
{gd}/{gt}
' o=f'
🤖 Baseline — {done}/{pending}{pct}%
{gb}' ac=state.get("active",[]) if ac:o+='
🔄 '+" ".join([f'{a}' for a in ac[-8:]])+'
' er=state.get("errors",[]) if er: o+='
' for e in er[-6:]:o+=f'
⚠️ {html.escape(e[:100])}
' o+='
' return o+'
' def _bg_eval(eval_api_key,eval_model_id,eval_provider,eval_label,judge_api_key,judge_model_id,judge_provider,judge_label,tasks,run_id,n_workers): global _EVAL_STATE try: with _EVAL_STATE["lock"]:_EVAL_STATE["start_time"]=time.time();_EVAL_STATE["message"]=f"⚡ Eval: {eval_label} · Judge: {judge_label} · {len(tasks)} tasks" results=dict(_load_all(run_id));cached=sum(1 for t in tasks if t.task_id in results);pending=[t for t in tasks if t.task_id not in results] print(f" 📊 Cached: {cached} / Pending: {len(pending)} / Total: {len(tasks)}") gt={}; for t in pending:gt.setdefault(t.grade,[]).append(t) with _EVAL_STATE["lock"]:_EVAL_STATE["results"]=results;_EVAL_STATE["cached"]=cached;_EVAL_STATE["total"]=len(pending);_EVAL_STATE["grade_total"]={g:len(ts) for g,ts in gt.items()};_EVAL_STATE["grade_done"]={g:0 for g in gt};_EVAL_STATE["done"]=0;_EVAL_STATE["errors"]=[];_EVAL_STATE["active"]=[] if pending: with ThreadPoolExecutor(max_workers=n_workers) as ex: futs={} for t in pending: if _EVAL_STATE["stop_requested"]:break futs[ex.submit(_eval_single,t,run_id,eval_api_key,eval_model_id,eval_provider,judge_api_key,judge_model_id,judge_provider,_EVAL_STATE)]=t done_set=set() while len(done_set){msg}' else:ph=msg th=_build_progress_table(results,tasks) if tasks else "";sh,dh,co="","",None if finished and tasks: el=_EVAL_STATE.get("eval_label","?");jl=_EVAL_STATE.get("judge_label","?");hf_st=_EVAL_STATE.get("hf_status","") sh=_build_summary_card(results,tasks,el,jl,hf_st);dh=_build_detail_view(results,tasks);co=cp return(ph,th,sh,dh,co) # ═══ §12. Gradio App ═══ HEADER="""

🏆 FINAL Bench v4.2 — Baseline Evaluation

Frontier Intelligence Nexus for AGI-Level Verification

100 Tasks · 15 Domains · 8 TICOS · 5-Axis · 5-Stage AGI Grade
🤖 Baseline (Non-AGI) — Single LLM Evaluation · Multi-Provider
Both Eval and Judge support OpenAI / Anthropic / Google

OpenAI · GPT-5.2 / 5-Mini / 4.1 / o4-mini / 4o Anthropic · Opus 4.6 / Sonnet 4.5 / Haiku 4.5 Google · Gemini 3 Pro Preview

🔒 MetaCog (Self-Correction Protocol): COMING SOON

📊 Dataset 🏆 Leaderboard
""" def create_app(): with gr.Blocks(title="FINAL Bench v4.2",css=".gradio-container{max-width:1100px !important} header{display:none!important}") as app: gr.HTML(HEADER) gr.Markdown("### 🔑 API Keys") with gr.Row(): eval_api_key=gr.Textbox(label="🤖 Eval Model API Key",type="password",placeholder="sk-... / sk-ant-... / AIza...",info="OpenAI / Anthropic / Google key",scale=3) judge_api_key=gr.Textbox(label="⚖️ Judge Model API Key",type="password",placeholder="sk-... / sk-ant-... / AIza...",info="OpenAI / Anthropic / Google key",scale=3) gr.Markdown("### 🤖 Model Selection") with gr.Row(): eval_m=gr.Dropdown(label="🤖 Evaluation Target",choices=MODEL_CHOICES,value=DEFAULT_EVAL,scale=3) judge_m=gr.Dropdown(label="⚖️ Judge Model",choices=MODEL_CHOICES,value=DEFAULT_JUDGE,scale=3) gr.Markdown("### ⚙️ Settings") with gr.Row(): gf=gr.Dropdown(["All","A","B","C"],value="All",label="Grade Filter",scale=1) df=gr.Dropdown(["All","expert","frontier"],value="All",label="Difficulty",scale=1) mt=gr.Slider(1,100,value=100,step=1,label="Max Tasks",scale=1) nw=gr.Slider(1,10,value=5,step=1,label="Workers",scale=1) with gr.Row(): s_btn=gr.Button("▶️ Start (Resume)",variant="primary",size="lg",scale=2) f_btn=gr.Button("🚀 Fresh Start",variant="secondary",size="lg",scale=2) x_btn=gr.Button("⏹️ Stop",variant="stop",size="lg",scale=1) status=gr.Textbox(label="Status",interactive=False,max_lines=2) with gr.Tabs(): with gr.Tab("📊 Progress"):p_html=gr.HTML() with gr.Tab("📋 Results"):t_html=gr.HTML() with gr.Tab("🏆 FINAL Score"):s_html=gr.HTML() with gr.Tab("🔍 Details"):d_html=gr.HTML() with gr.Tab("💾 CSV"):c_file=gr.File(label="CSV") timer=gr.Timer(value=2,active=True) timer.tick(fn=_poll,outputs=[p_html,t_html,s_html,d_html,c_file]) eval_ins=[eval_api_key,judge_api_key,eval_m,judge_m,gf,df,mt,nw] s_btn.click(fn=lambda *a:_start_eval(*a,fresh=False),inputs=eval_ins,outputs=[status]) f_btn.click(fn=lambda *a:_start_eval(*a,fresh=True),inputs=eval_ins,outputs=[status]) x_btn.click(fn=_stop,outputs=[status]) gr.Markdown("---\n
FINAL Bench v4.2 · Baseline · OpenAI / Anthropic / Google · Apache 2.0 · Ginigen AI
") return app if __name__=="__main__": sg,sd={},{} for t in ALL_TASKS:sg[t.grade]=sg.get(t.grade,0)+1;sd[t.domain]=sd.get(t.domain,0)+1 print(f"\n{'='*60}\n FINAL Bench v4.2 — Baseline (Non-AGI)\n Eval & Judge: OpenAI / Anthropic / Google\n{'='*60}") print(f" {len(ALL_TASKS)} tasks | {len(sd)} domains") for g in["A","B","C"]:print(f" Grade {g} (×{GRADE_WEIGHT[g]}): {sg.get(g,0)}") print(f" 🔒 MetaCog: COMING SOON\n{'='*60}\n") app=create_app();app.queue(default_concurrency_limit=2) app.launch(server_name="0.0.0.0",server_port=7860,ssr_mode=False)