import os import json import re import uuid import io import csv from datetime import datetime import openai import gradio as gr import numpy as np import google.generativeai as genai import random # ------------------------- # Config # ------------------------- PHYSICIAN_COMPLETION_MODES = {"Group 1": 1, "Group 2": 2, "Group 3": 3} DATASET_FILES = { "regular": os.path.join(os.path.dirname(__file__), "data", "oss_eval.jsonl"), "hard": os.path.join(os.path.dirname(__file__), "data", "hard_2025-05-08-21-00-10.jsonl"), "consensus": os.path.join(os.path.dirname(__file__), "data", "consensus_2025-05-09-20-00-46.jsonl"), } CANDIDATE_MODELS = [ "gpt-4.1", "gpt-4o-mini", "gpt-5-chat-latest" ] GRADER_MODEL = "gpt-4o-mini" openai.api_key = os.getenv("OPENAI_API_KEY") genai.configure(api_key=os.getenv("GEMINI_API_KEY")) # Models that only support default temperature MODEL_DEFAULT_TEMP = ["o4-mini"] # ------------------------- # Helper to read JSONL # ------------------------- def read_jsonl(file_path): data = [] with open(file_path, "r", encoding="utf-8") as f: for line in f: data.append(json.loads(line)) return data # ------------------------- # Evaluation class # ------------------------- class HealthBenchEval: def __init__(self, dataset_file, num_examples=None, seed=None): self.all_data = read_jsonl(dataset_file) if seed is not None and num_examples: random.seed(seed) self.indices = random.sample(range(len(self.all_data)), min(num_examples, len(self.all_data))) self.dataset = [self.all_data[i] for i in self.indices] elif num_examples: self.indices = list(range(num_examples)) self.dataset = self.all_data[:num_examples] else: self.indices = list(range(len(self.all_data))) self.dataset = self.all_data self.scores = [] self.htmls = "" self.sample_records = [] self.seed = seed self.eval_id = str(uuid.uuid4()) def score_with_grader(self, prompt_text, completion_text, example_index): prompt = f""" You are a grader model. Score the following completion from 0.0 to 1.0. Prompt: {prompt_text} Completion: {completion_text} Return only a number between 0 and 1. """ try: resp = openai.chat.completions.create( model=GRADER_MODEL, messages=[{"role": "user", "content": prompt}], max_completion_tokens=50 ) score_text = resp.choices[0].message.content.strip() match = re.search(r"0(?:\.\d+)?|1(?:\.0+)?", score_text) if match: score = float(match.group(0)) else: score = 0.0 return max(0.0, min(1.0, score)) except Exception as e: print(f"Grader error: {e}") return 0.0 def generate_with_candidate(self, candidate_model, system_prompt, prompt_text, example_index, max_tokens=1024): for attempt in range(3): try: if candidate_model.startswith("gemini"): model = genai.GenerativeModel(candidate_model) full_prompt = "" if system_prompt: full_prompt += f"System: {system_prompt}\n" full_prompt += f"User: {prompt_text}" response = model.generate_content( full_prompt, generation_config={"max_output_tokens": max_tokens, "temperature": 0.7} ) completion = response.text if response.text else "[EMPTY GEMINI OUTPUT]" else: messages = [] if system_prompt: messages.append({"role": "system", "content": system_prompt}) messages.append({"role": "user", "content": prompt_text}) if candidate_model in MODEL_DEFAULT_TEMP: resp = openai.chat.completions.create( model=candidate_model, messages=messages, max_completion_tokens=max_tokens ) else: resp = openai.chat.completions.create( model=candidate_model, messages=messages, temperature=0.7, max_completion_tokens=max_tokens ) completion = resp.choices[0].message.content return completion.strip() if hasattr(completion, "strip") else completion except Exception as e: print(f"[ERROR] Candidate model {candidate_model} failed at dataset index {example_index} (attempt {attempt+1}/3)") print(f"Prompt text: {prompt_text[:200]}...") print(f"Error: {e}") if attempt == 2: return f"[ERROR after 3 retries: {str(e)}]" def __call__(self, candidate_model, system_prompt, eval_subset=""): html_lines = ["
| Eval ID | Timestamp | Candidate Model | System Prompt | Eval Subset | Seed | Dataset Row | Prompt Text | Completion Text | Score | Cumulative Total | Cumulative Avg |
|---|
No evaluations yet.
" return runs_html def generate_csv(runs): if not runs: return None output = io.StringIO() fieldnames = ["eval_id", "timestamp", "candidate_model", "system_prompt", "eval_subset", "seed", "dataset_index", "prompt_text", "completion_text", "score", "cumulative_total", "cumulative_avg"] writer = csv.DictWriter(output, fieldnames=fieldnames) writer.writeheader() for run in runs: writer.writerow(run) csv_data = output.getvalue() output.close() return csv_data def prepare_download(runs): csv_data = generate_csv(runs) if not csv_data: return None filename = f"eval_results_{datetime.now().strftime('%Y%m%d_%H%M%S')}.csv" filepath = os.path.join("/tmp", filename) with open(filepath, "w", encoding="utf-8") as f: f.write(csv_data) return filepath # ------------------------- # Gradio UI function # ------------------------- def run_eval_ui(candidate_model, system_prompt, eval_subset, num_examples, seed, runs): dataset_file = DATASET_FILES.get(eval_subset) if not dataset_file: return "Invalid dataset
", {}, generate_runs_html(runs), runs seed_val = int(seed) if seed else None num_val = int(num_examples) if num_examples else None eval_obj = HealthBenchEval(dataset_file, num_examples=num_val, seed=seed_val) result = eval_obj(candidate_model, system_prompt, eval_subset=eval_subset) runs.extend(result.sample_records) runs_html = generate_runs_html(runs) metrics = { "eval_id": result.eval_id, "mean_score": float(np.mean(result.scores)) if result.scores else 0.0, "std_score": float(np.std(result.scores)) if result.scores else 0.0, "n_samples": len(result.scores), "seed": seed_val } return result.htmls, metrics, runs_html, runs def clear_runs(): return "No evaluations yet.
", [] # ------------------------- # Gradio UI # ------------------------- def ui(): with gr.Blocks(title="HealthBench OpenAI + Gemini Evaluation") as demo: gr.Markdown("## HealthBench Evaluation (OpenAI + Gemini API-based)") with gr.Row(): candidate_model = gr.Dropdown( label="Candidate model", choices=CANDIDATE_MODELS, value="gpt-4o-mini", ) eval_subset = gr.Dropdown( label="Eval subset", choices=list(DATASET_FILES.keys()), value="regular" ) num_examples = gr.Number(label="# examples (leave blank for all)", value=1, precision=0) seed = gr.Textbox(label="Random Seed (optional)", placeholder="Enter a seed for reproducibility") system_prompt = gr.Textbox( label="System Prompt (optional)", placeholder="Enter a system prompt here for the candidate model", lines=3 ) run_btn = gr.Button("Run evaluation") output_html = gr.HTML(label="Evaluation Report") output_metrics = gr.JSON(label="Metrics JSON") output_all_runs = gr.HTML(label="Evaluation History") session_runs = gr.State([]) with gr.Row(): clear_btn = gr.Button("Clear History") download_btn = gr.DownloadButton( label="Download CSV", variant="secondary" ) run_btn.click( fn=run_eval_ui, inputs=[candidate_model, system_prompt, eval_subset, num_examples, seed, session_runs], outputs=[output_html, output_metrics, output_all_runs, session_runs] ) clear_btn.click( fn=clear_runs, inputs=[], outputs=[output_all_runs, session_runs] ) download_btn.click( fn=prepare_download, inputs=[session_runs], outputs=[download_btn] ) return demo if __name__ == "__main__": demo = ui() demo.queue(max_size=5) demo.launch()