Spaces:
Runtime error
Runtime error
Commit
Β·
ed67886
1
Parent(s):
40b70c8
wip
Browse files- app.py +5 -0
- src/datamodel/data.py +4 -0
app.py
CHANGED
|
@@ -29,15 +29,18 @@ from src.display.css_html_js import custom_css
|
|
| 29 |
# Precision
|
| 30 |
# )
|
| 31 |
from src.envs import API, REPO_ID, TOKEN, CODE_PROBLEMS_REPO, SUBMISSIONS_REPO, RESULTS_REPO
|
|
|
|
| 32 |
# from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
| 33 |
from src.submission.submit import add_new_solutions
|
| 34 |
|
|
|
|
| 35 |
|
| 36 |
def restart_space():
|
| 37 |
API.restart_space(repo_id=REPO_ID)
|
| 38 |
|
| 39 |
lbdb = F1Data(cp_ds_name=CODE_PROBLEMS_REPO, sub_ds_name=SUBMISSIONS_REPO, res_ds_name=RESULTS_REPO)
|
| 40 |
|
|
|
|
| 41 |
|
| 42 |
# (
|
| 43 |
# finished_eval_queue_df,
|
|
@@ -87,9 +90,11 @@ with demo:
|
|
| 87 |
# leaderboard = init_leaderboard(LEADERBOARD_DF)
|
| 88 |
|
| 89 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
|
|
|
| 90 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 91 |
|
| 92 |
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
|
|
|
| 93 |
with gr.Column():
|
| 94 |
with gr.Row():
|
| 95 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
|
|
|
| 29 |
# Precision
|
| 30 |
# )
|
| 31 |
from src.envs import API, REPO_ID, TOKEN, CODE_PROBLEMS_REPO, SUBMISSIONS_REPO, RESULTS_REPO
|
| 32 |
+
from src.logger import get_logger
|
| 33 |
# from src.populate import get_evaluation_queue_df, get_leaderboard_df
|
| 34 |
from src.submission.submit import add_new_solutions
|
| 35 |
|
| 36 |
+
logger = get_logger(__name__)
|
| 37 |
|
| 38 |
def restart_space():
|
| 39 |
API.restart_space(repo_id=REPO_ID)
|
| 40 |
|
| 41 |
lbdb = F1Data(cp_ds_name=CODE_PROBLEMS_REPO, sub_ds_name=SUBMISSIONS_REPO, res_ds_name=RESULTS_REPO)
|
| 42 |
|
| 43 |
+
logger.info("Initialized LBDB")
|
| 44 |
|
| 45 |
# (
|
| 46 |
# finished_eval_queue_df,
|
|
|
|
| 90 |
# leaderboard = init_leaderboard(LEADERBOARD_DF)
|
| 91 |
|
| 92 |
with gr.TabItem("π About", elem_id="llm-benchmark-tab-table", id=2):
|
| 93 |
+
logger.info("Tab about")
|
| 94 |
gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
|
| 95 |
|
| 96 |
with gr.TabItem("π Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
|
| 97 |
+
logger.info("Tab submission")
|
| 98 |
with gr.Column():
|
| 99 |
with gr.Row():
|
| 100 |
gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
|
src/datamodel/data.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import functools
|
|
|
|
| 2 |
|
| 3 |
from datasets import load_dataset
|
| 4 |
|
|
@@ -20,5 +21,8 @@ class F1Data:
|
|
| 20 |
|
| 21 |
def initialize(self):
|
| 22 |
logger.info("Initialize F1Data TOMEN='%s'", TOKEN)
|
|
|
|
| 23 |
cp_ds = load_dataset(self.cp_dataset_name, split="hard", token=TOKEN)
|
|
|
|
| 24 |
self.code_problems: dict[str, str] = {r["formula_name"]: r["code_problem"]["problem_description"] for r in cp_ds}
|
|
|
|
|
|
| 1 |
import functools
|
| 2 |
+
import time
|
| 3 |
|
| 4 |
from datasets import load_dataset
|
| 5 |
|
|
|
|
| 21 |
|
| 22 |
def initialize(self):
|
| 23 |
logger.info("Initialize F1Data TOMEN='%s'", TOKEN)
|
| 24 |
+
start_time = time.monotonic()
|
| 25 |
cp_ds = load_dataset(self.cp_dataset_name, split="hard", token=TOKEN)
|
| 26 |
+
logger.info("Loaded code-problems dataset from %s in %f sec", self.cp_dataset_name, time.monotonic() - start_time)
|
| 27 |
self.code_problems: dict[str, str] = {r["formula_name"]: r["code_problem"]["problem_description"] for r in cp_ds}
|
| 28 |
+
logger.info("Code problems info: %s", self.code_problems)
|