Spaces:
Runtime error
Runtime error
j_yoon.song
commited on
Commit
·
754aba7
1
Parent(s):
1f16be9
minor
Browse files- app.py +2 -6
- src/about.py +3 -3
app.py
CHANGED
|
@@ -24,7 +24,6 @@ from src.display.utils import (
|
|
| 24 |
EVAL_COLS,
|
| 25 |
EVAL_TYPES,
|
| 26 |
AutoEvalColumn,
|
| 27 |
-
ModelType,
|
| 28 |
fields,
|
| 29 |
WeightType,
|
| 30 |
Precision
|
|
@@ -53,9 +52,6 @@ try:
|
|
| 53 |
except Exception:
|
| 54 |
restart_space()
|
| 55 |
|
| 56 |
-
|
| 57 |
-
# LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
| 58 |
-
|
| 59 |
(
|
| 60 |
finished_eval_queue_df,
|
| 61 |
running_eval_queue_df,
|
|
@@ -172,7 +168,7 @@ with demo:
|
|
| 172 |
gr.Markdown(EVALUATION_QUEUE_TEXT_OPTION1, elem_classes="markdown-text")
|
| 173 |
|
| 174 |
with gr.Row():
|
| 175 |
-
gr.Markdown("
|
| 176 |
|
| 177 |
with gr.Row():
|
| 178 |
with gr.Column():
|
|
@@ -227,7 +223,7 @@ with demo:
|
|
| 227 |
gr.Markdown(EVALUATION_QUEUE_TEXT_OPTION2, elem_classes="markdown-text")
|
| 228 |
|
| 229 |
with gr.Row():
|
| 230 |
-
gr.Markdown("
|
| 231 |
|
| 232 |
with gr.Row():
|
| 233 |
with gr.Column():
|
|
|
|
| 24 |
EVAL_COLS,
|
| 25 |
EVAL_TYPES,
|
| 26 |
AutoEvalColumn,
|
|
|
|
| 27 |
fields,
|
| 28 |
WeightType,
|
| 29 |
Precision
|
|
|
|
| 52 |
except Exception:
|
| 53 |
restart_space()
|
| 54 |
|
|
|
|
|
|
|
|
|
|
| 55 |
(
|
| 56 |
finished_eval_queue_df,
|
| 57 |
running_eval_queue_df,
|
|
|
|
| 168 |
gr.Markdown(EVALUATION_QUEUE_TEXT_OPTION1, elem_classes="markdown-text")
|
| 169 |
|
| 170 |
with gr.Row():
|
| 171 |
+
gr.Markdown("## ✉️✨ Submit your model here! (vLLM inference is possible)", elem_classes="markdown-text")
|
| 172 |
|
| 173 |
with gr.Row():
|
| 174 |
with gr.Column():
|
|
|
|
| 223 |
gr.Markdown(EVALUATION_QUEUE_TEXT_OPTION2, elem_classes="markdown-text")
|
| 224 |
|
| 225 |
with gr.Row():
|
| 226 |
+
gr.Markdown("## ✉️✨ Submit your model here! (vLLM inference is impossible)", elem_classes="markdown-text")
|
| 227 |
|
| 228 |
with gr.Row():
|
| 229 |
with gr.Column():
|
src/about.py
CHANGED
|
@@ -65,17 +65,17 @@ If everything is done, check you can launch the EleutherAIHarness on your model
|
|
| 65 |
"""
|
| 66 |
|
| 67 |
EVALUATION_QUEUE_TEXT_OPTION1 = """
|
| 68 |
-
|
| 69 |
Fill the information including model name, vLLM version, sampling hyperparameters.
|
| 70 |
"""
|
| 71 |
|
| 72 |
EVALUATION_QUEUE_TEXT_OPTION2 = """
|
| 73 |
-
|
| 74 |
Fill the information same with Option 1 and code snippets of model loading, inference, and termination.
|
| 75 |
"""
|
| 76 |
|
| 77 |
EVALUATION_QUEUE_TEXT_OPTION3 = """
|
| 78 |
-
|
| 79 |
If Option 1 & 2 is unavailable, make [PR](https://huggingface.co/spaces/coms1580/test_space/discussions?status=open&type=pull_request&sort=recently-created&new_pr=true) with [ADD_MODEL] prefix with contents as follows:
|
| 80 |
|
| 81 |
```
|
|
|
|
| 65 |
"""
|
| 66 |
|
| 67 |
EVALUATION_QUEUE_TEXT_OPTION1 = """
|
| 68 |
+
# (Option 1) Submit HF model where vLLM inference is available
|
| 69 |
Fill the information including model name, vLLM version, sampling hyperparameters.
|
| 70 |
"""
|
| 71 |
|
| 72 |
EVALUATION_QUEUE_TEXT_OPTION2 = """
|
| 73 |
+
# (Option 2) Submit HF model where vLLM inference is unavailable
|
| 74 |
Fill the information same with Option 1 and code snippets of model loading, inference, and termination.
|
| 75 |
"""
|
| 76 |
|
| 77 |
EVALUATION_QUEUE_TEXT_OPTION3 = """
|
| 78 |
+
# (Option 3) Pull Request
|
| 79 |
If Option 1 & 2 is unavailable, make [PR](https://huggingface.co/spaces/coms1580/test_space/discussions?status=open&type=pull_request&sort=recently-created&new_pr=true) with [ADD_MODEL] prefix with contents as follows:
|
| 80 |
|
| 81 |
```
|