| """ConStellaration Progress Tracker. |
| |
| Read a single results.jsonl from the tracker dataset, render it in the same |
| leaderboard pattern as the official Space. No submit / evaluate paths. |
| """ |
| import json |
| import os |
| from pathlib import Path |
|
|
| import gradio as gr |
| import pandas as pd |
| from gradio_leaderboard import Leaderboard |
| from huggingface_hub import hf_hub_download |
|
|
| RESULTS_REPO = "phanerozoic/constellaration-tracker-results" |
| RESULTS_FILE = "results.jsonl" |
|
|
|
|
| def make_user_link(name: str) -> str: |
| return ( |
| f'<a target="_blank" href="https://huggingface.co/{name}" ' |
| f'style="color: var(--link-text-color); text-decoration: underline;' |
| f'text-decoration-style: dotted;">{name}</a>' |
| ) |
|
|
|
|
| def make_bundle_link(path: str) -> str: |
| if not path: |
| return "" |
| url = f"https://huggingface.co/datasets/{RESULTS_REPO}/blob/main/{path}" |
| return ( |
| f'<a target="_blank" href="{url}" ' |
| f'style="color: var(--link-text-color); text-decoration: underline;' |
| f'text-decoration-style: dotted;">bundle</a>' |
| ) |
|
|
|
|
| def load_results_df() -> pd.DataFrame: |
| try: |
| local = hf_hub_download( |
| repo_id=RESULTS_REPO, |
| repo_type="dataset", |
| filename=RESULTS_FILE, |
| force_download=True, |
| ) |
| records = [json.loads(line) for line in Path(local).read_text().splitlines() if line.strip()] |
| except Exception as e: |
| records = [{"error": str(e)}] |
| if not records or "error" in records[0]: |
| return pd.DataFrame(columns=["submission time", "problem type", "user", "score", "bundle size", "bundle"]) |
| df = pd.DataFrame(records) |
| df.rename(columns={ |
| "submission_time": "submission time", |
| "problem_type": "problem type", |
| "bundle_size": "bundle size", |
| "bundle_filename": "bundle", |
| }, inplace=True) |
| if "user" in df.columns: |
| df["user"] = df["user"].apply(make_user_link).astype(str) |
| if "bundle" in df.columns: |
| df["bundle"] = df["bundle"].apply(make_bundle_link).astype(str) |
| cols = [c for c in ["submission time", "problem type", "user", "score", "bundle size", "bundle"] if c in df.columns] |
| df = df[cols] |
| if "score" in df.columns: |
| df = df.sort_values("score", ascending=False) |
| return df |
|
|
|
|
| def gradio_interface() -> gr.Blocks: |
| with gr.Blocks() as demo: |
| gr.Markdown("## ConStellaration Progress Tracker") |
| gr.Markdown( |
| "Unofficial fallback view of locally-evaluated ConStellaration " |
| "benchmark submissions. The " |
| "[official leaderboard](https://huggingface.co/spaces/proxima-fusion/constellaration-bench) " |
| "remains the authoritative source for ranked submissions; this Space exists " |
| "so progress remains visible during periods when the official Space is down " |
| "or its result-write step is delayed." |
| ) |
| with gr.Tabs(elem_classes="tab-buttons"): |
| with gr.TabItem("🛰️ Tracker"): |
| Leaderboard( |
| value=load_results_df(), |
| datatype=["date", "str", "html", "number", "number", "html"], |
| select_columns=["submission time", "problem type", "user", "score", "bundle size", "bundle"], |
| search_columns=["submission time", "score", "user", "problem type"], |
| filter_columns=["problem type"], |
| every=120, |
| render=True, |
| ) |
| gr.Markdown( |
| "Scores are computed by running the standard `constellaration.problems` " |
| "evaluation locally. For `geometrical` and `simple_to_build`, scores are " |
| "bounded between 0.0 and 1.0. For `mhd_stable`, the score is unbounded " |
| "hypervolume of the feasible Pareto front in (-L_grad_B, aspect_ratio) " |
| "space with reference point (1.0, 20.0)." |
| ) |
|
|
| with gr.TabItem("❔ About"): |
| gr.Markdown( |
| """ |
| ### Purpose |
| |
| The official [ConStellaration leaderboard](https://huggingface.co/spaces/proxima-fusion/constellaration-bench) |
| relies on a server-side scoring pipeline. When that pipeline is down or |
| delayed, submitted boundaries do not appear on the ranking even though |
| they have been uploaded. This Space mirrors what the official table |
| would show using local evaluations of the same scoring code. |
| |
| ### How scoring works |
| |
| Every entry shown here is produced by running |
| `constellaration.problems.<problem>().evaluate(boundary)` on the same |
| boundary file that was (or will be) submitted to the official Space. |
| The scoring code is the public reference implementation shipped in the |
| [`constellaration`](https://github.com/proximafusion/constellaration) |
| package; no surrogate or approximation is used. |
| |
| ### What this Space is not |
| |
| - Not an official Proxima Fusion artifact. |
| - Not a substitute for the official leaderboard when it is up. |
| - Not a submission interface; new entries are added by writing to the |
| backing dataset `phanerozoic/constellaration-tracker-results`. |
| |
| ### Dataset |
| |
| [`phanerozoic/constellaration-tracker-results`](https://huggingface.co/datasets/phanerozoic/constellaration-tracker-results) |
| is a public dataset with a single `results.jsonl` file. Each line is one |
| locally-evaluated record. |
| """ |
| ) |
|
|
| return demo |
|
|
|
|
| if __name__ == "__main__": |
| gradio_interface().launch() |
|
|