j_yoon.song commited on
Commit
f6237c7
·
1 Parent(s): 8a48b81
Files changed (3) hide show
  1. app.py +43 -51
  2. src/about.py +11 -5
  3. src/submission/submit.py +16 -15
app.py CHANGED
@@ -170,41 +170,6 @@ with demo:
170
  with gr.Row():
171
  gr.Markdown(EVALUATION_QUEUE_TEXT_OPTION1, elem_classes="markdown-text")
172
 
173
- # with gr.Column():
174
- # with gr.Accordion(
175
- # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
176
- # open=False,
177
- # ):
178
- # with gr.Row():
179
- # finished_eval_table = gr.components.Dataframe(
180
- # value=finished_eval_queue_df,
181
- # headers=EVAL_COLS,
182
- # datatype=EVAL_TYPES,
183
- # row_count=5,
184
- # )
185
- # with gr.Accordion(
186
- # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
187
- # open=False,
188
- # ):
189
- # with gr.Row():
190
- # running_eval_table = gr.components.Dataframe(
191
- # value=running_eval_queue_df,
192
- # headers=EVAL_COLS,
193
- # datatype=EVAL_TYPES,
194
- # row_count=5,
195
- # )
196
-
197
- # with gr.Accordion(
198
- # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
199
- # open=False,
200
- # ):
201
- # with gr.Row():
202
- # pending_eval_table = gr.components.Dataframe(
203
- # value=pending_eval_queue_df,
204
- # headers=EVAL_COLS,
205
- # datatype=EVAL_TYPES,
206
- # row_count=5,
207
- # )
208
  with gr.Row():
209
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
210
 
@@ -212,15 +177,6 @@ with demo:
212
  with gr.Column():
213
  model_name_textbox = gr.Textbox(label="Model name")
214
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
215
- model_type = gr.Dropdown(
216
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
217
- label="Model type",
218
- multiselect=False,
219
- value=None,
220
- interactive=True,
221
- )
222
-
223
- with gr.Column():
224
  precision = gr.Dropdown(
225
  choices=[i.value.name for i in Precision if i != Precision.Unknown],
226
  label="Precision",
@@ -228,14 +184,50 @@ with demo:
228
  value="float16",
229
  interactive=True,
230
  )
231
- weight_type = gr.Dropdown(
232
- choices=[i.value.name for i in WeightType],
233
- label="Weights type",
 
234
  multiselect=False,
235
- value="Original",
236
  interactive=True,
237
  )
238
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
240
  submit_button = gr.Button("Submit Eval")
241
  submission_result = gr.Markdown()
@@ -246,8 +238,8 @@ with demo:
246
  base_model_name_textbox,
247
  revision_name_textbox,
248
  precision,
249
- weight_type,
250
- model_type,
251
  ],
252
  submission_result,
253
  )
 
170
  with gr.Row():
171
  gr.Markdown(EVALUATION_QUEUE_TEXT_OPTION1, elem_classes="markdown-text")
172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  with gr.Row():
174
  gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
175
 
 
177
  with gr.Column():
178
  model_name_textbox = gr.Textbox(label="Model name")
179
  revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
 
 
 
 
 
 
 
 
 
180
  precision = gr.Dropdown(
181
  choices=[i.value.name for i in Precision if i != Precision.Unknown],
182
  label="Precision",
 
184
  value="float16",
185
  interactive=True,
186
  )
187
+ base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
188
+ vllm_version_type = gr.Dropdown(
189
+ choices=["v0.6.0", "v0.6.1.post1", "v0.6.1.post2", "v0.6.1", "v0.6.2", "v0.6.3.post1", "v0.6.4", "v0.6.4.post1", "v0.6.5"],
190
+ label="Model type",
191
  multiselect=False,
192
+ value=None,
193
  interactive=True,
194
  )
195
+ with gr.Column():
196
+ temperature_textbox = gr.Textbox(label="Sampling Temperature (default: 1.0)")
197
+ top_p_textbox = gr.Textbox(label="Top-p (default: 1.0)")
198
+ top_k_textbox = gr.Textbox(label="Top-k (default: -1)")
199
+ presence_penalty_textbox = gr.Textbox(label="Presence penalty (default: 0.0)")
200
+ frequency_penalty_textbox = gr.Textbox(label="Repetition penalty (default: 0.0)")
201
+ repetition_penalty_textbox = gr.Textbox(label="Repetition penalty (default: 1.0)")
202
+
203
+ # with gr.Row():
204
+ # with gr.Column():
205
+ # model_name_textbox = gr.Textbox(label="Model name")
206
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
207
+ # model_type = gr.Dropdown(
208
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
209
+ # label="Model type",
210
+ # multiselect=False,
211
+ # value=None,
212
+ # interactive=True,
213
+ # )
214
+
215
+ # with gr.Column():
216
+ # precision = gr.Dropdown(
217
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
218
+ # label="Precision",
219
+ # multiselect=False,
220
+ # value="float16",
221
+ # interactive=True,
222
+ # )
223
+ # weight_type = gr.Dropdown(
224
+ # choices=[i.value.name for i in WeightType],
225
+ # label="Weights type",
226
+ # multiselect=False,
227
+ # value="Original",
228
+ # interactive=True,
229
+ # )
230
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
231
 
232
  submit_button = gr.Button("Submit Eval")
233
  submission_result = gr.Markdown()
 
238
  base_model_name_textbox,
239
  revision_name_textbox,
240
  precision,
241
+ # weight_type,
242
+ # model_type,
243
  ],
244
  submission_result,
245
  )
src/about.py CHANGED
@@ -65,12 +65,18 @@ If everything is done, check you can launch the EleutherAIHarness on your model
65
  """
66
 
67
  EVALUATION_QUEUE_TEXT_OPTION1 = """
68
- ## (Option 1) Direct Submission
69
- We encourage to fill "✉️✨ Submit your model here!" section below to submit your model.
70
  """
71
- EVALUATION_QUEUE_TEXT_OPTION4 = """
72
- ## (Option 4) Pull Request
73
- If Option 1-3 is unavailable, make [PR](https://huggingface.co/spaces/coms1580/test_space/discussions?status=open&type=pull_request&sort=recently-created&new_pr=true) with [ADD_MODEL] prefix with contents as follows:
 
 
 
 
 
 
74
 
75
  ```
76
  ### Open-weight models:
 
65
  """
66
 
67
  EVALUATION_QUEUE_TEXT_OPTION1 = """
68
+ ## (Option 1) Submit HF model where vLLM inference is available
69
+ Fill the information including model name, vLLM version, sampling hyperparameters.
70
  """
71
+
72
+ EVALUATION_QUEUE_TEXT_OPTION2 = """
73
+ ## (Option 2) Submit HF model where vLLM inference is unavailable
74
+ Fill the information same with Option 1 and code snippets of model loading, inference, and termination.
75
+ """
76
+
77
+ EVALUATION_QUEUE_TEXT_OPTION3 = """
78
+ ## (Option 3) Pull Request
79
+ If Option 1 & 2 is unavailable, make [PR](https://huggingface.co/spaces/coms1580/test_space/discussions?status=open&type=pull_request&sort=recently-created&new_pr=true) with [ADD_MODEL] prefix with contents as follows:
80
 
81
  ```
82
  ### Open-weight models:
src/submission/submit.py CHANGED
@@ -19,8 +19,8 @@ def add_new_eval(
19
  base_model: str,
20
  revision: str,
21
  precision: str,
22
- weight_type: str,
23
- model_type: str,
24
  ):
25
  global REQUESTED_MODELS
26
  global USERS_TO_SUBMISSION_DATES
@@ -36,23 +36,23 @@ def add_new_eval(
36
  precision = precision.split(" ")[0]
37
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
 
39
- if model_type is None or model_type == "":
40
- return styled_error("Please select a model type.")
41
 
42
  # Does the model actually exist?
43
  if revision == "":
44
  revision = "main"
45
 
46
  # Is the model on the hub?
47
- if weight_type in ["Delta", "Adapter"]:
48
- base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
- if not base_model_on_hub:
50
- return styled_error(f'Base model "{base_model}" {error}')
51
 
52
- if not weight_type == "Adapter":
53
- model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
- if not model_on_hub:
55
- return styled_error(f'Model "{model}" {error}')
56
 
57
  # Is the model info correctly filled?
58
  try:
@@ -80,10 +80,10 @@ def add_new_eval(
80
  "base_model": base_model,
81
  "revision": revision,
82
  "precision": precision,
83
- "weight_type": weight_type,
84
  "status": "PENDING",
85
  "submitted_time": current_time,
86
- "model_type": model_type,
87
  "likes": model_info.likes,
88
  "params": model_size,
89
  "license": license,
@@ -97,7 +97,8 @@ def add_new_eval(
97
  print("Creating eval file")
98
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
  os.makedirs(OUT_DIR, exist_ok=True)
100
- out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
 
101
 
102
  with open(out_path, "w") as f:
103
  f.write(json.dumps(eval_entry))
 
19
  base_model: str,
20
  revision: str,
21
  precision: str,
22
+ # weight_type: str,
23
+ # model_type: str,
24
  ):
25
  global REQUESTED_MODELS
26
  global USERS_TO_SUBMISSION_DATES
 
36
  precision = precision.split(" ")[0]
37
  current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
38
 
39
+ # if model_type is None or model_type == "":
40
+ # return styled_error("Please select a model type.")
41
 
42
  # Does the model actually exist?
43
  if revision == "":
44
  revision = "main"
45
 
46
  # Is the model on the hub?
47
+ # if weight_type in ["Delta", "Adapter"]:
48
+ # base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=TOKEN, test_tokenizer=True)
49
+ # if not base_model_on_hub:
50
+ # return styled_error(f'Base model "{base_model}" {error}')
51
 
52
+ # if not weight_type == "Adapter":
53
+ # model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, token=TOKEN, test_tokenizer=True)
54
+ # if not model_on_hub:
55
+ # return styled_error(f'Model "{model}" {error}')
56
 
57
  # Is the model info correctly filled?
58
  try:
 
80
  "base_model": base_model,
81
  "revision": revision,
82
  "precision": precision,
83
+ # "weight_type": weight_type,
84
  "status": "PENDING",
85
  "submitted_time": current_time,
86
+ # "model_type": model_type,
87
  "likes": model_info.likes,
88
  "params": model_size,
89
  "license": license,
 
97
  print("Creating eval file")
98
  OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}"
99
  os.makedirs(OUT_DIR, exist_ok=True)
100
+ # out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
101
+ out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}.json"
102
 
103
  with open(out_path, "w") as f:
104
  f.write(json.dumps(eval_entry))