yangzhitao
refactor: update model key extraction and improve model dtype handling in create_submit_tab function for enhanced clarity
58bbf33
import json
import os
import sys
from datetime import datetime, timezone
import requests
from src.display.formatting import styled_error, styled_message, styled_warning
from src.envs import API, settings
from src.submission.check_validity import (
already_submitted_models,
check_model_card,
get_model_size,
is_model_on_hub,
)
if sys.version_info < (3, 11):
UTC = timezone.utc
else:
from datetime import UTC
REQUESTED_MODELS: set[str] | None = None
def add_new_submit(
model: str,
base_model: str,
revision: str | None,
precision: str,
weight_type: str,
model_type: str,
json_str: str,
commit_message: str,
user_id: str,
):
"""
Submit a new evaluation request.
Args:
model: Model name (e.g., "org/model_name")
base_model: Base model name (for delta or adapter weights)
revision: Model revision/commit (defaults to "main" if empty)
precision: Model precision (e.g., "float16", "bfloat16")
weight_type: Weight type (e.g., "Original", "Delta", "Adapter")
model_type: Model type (e.g., "pretrained", "fine-tuned")
json_str: JSON string containing config and results
commit_message: Optional commit message
user_id: Submitter's HuggingFace user ID/username (from OAuth)
"""
global REQUESTED_MODELS
if not REQUESTED_MODELS:
REQUESTED_MODELS, _ = already_submitted_models(settings.EVAL_REQUESTS_PATH.as_posix())
# Use provided user_id, or extract from model name as fallback
if " " in precision:
precision = precision.split(" ")[0]
# Does the model actually exist?
revision = revision or None
# Is the model on the hub?
if weight_type in ["Delta", "Adapter"]:
base_model_on_hub, error, _ = is_model_on_hub(
model_name=base_model,
revision=revision or "main",
token=settings.HF_TOKEN.get_secret_value(),
test_tokenizer=True,
)
if not base_model_on_hub:
return styled_error(f'Base model "{base_model}" {error}')
if not weight_type == "Adapter":
model_on_hub, error, _ = is_model_on_hub(
model_name=model,
revision=revision or "main",
token=settings.HF_TOKEN.get_secret_value(),
test_tokenizer=True,
)
if not model_on_hub:
return styled_error(f'Model "{model}" {error}')
# Is the model info correctly filled?
try:
model_info = API.model_info(repo_id=model, revision=revision)
except Exception:
return styled_error("Could not get your model information. Please fill it up properly.")
# Were the model card and license filled?
try:
license = model_info.cardData["license"]
except Exception:
return styled_error("Please select a license for your model")
# Validate required fields
if not model or not model.strip():
return styled_error("Model name is required.")
if not user_id or not user_id.strip():
return styled_error("User ID/username is required. Please make sure you are logged in.")
# Get current UTC time for submit_time
current_time = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
# Parse the evaluation results JSON (json_str contains config and results)
try:
eval_results = json.loads(json_str)
except json.JSONDecodeError:
return styled_error("Invalid evaluation results JSON format.")
# Organize all fields into a comprehensive JSON structure for the content field
# This will be the complete JSON that gets uploaded as a file
model_type = model_type.rpartition(":")[2].strip() # "⭕ : instruction-tuned" -> "instruction-tuned"
complete_submission_content = {
"user_id": user_id,
"model_id": model,
"base_model": base_model or "",
"model_sha": revision,
"model_dtype": precision,
"weight_type": weight_type,
"model_type": model_type or "",
"submit_time": current_time,
"commit_message": commit_message,
# Include the evaluation results (config and results)
"config": eval_results.get("config", {}),
"results": eval_results.get("results", {}),
}
# Convert the complete submission content to JSON string for the content field
complete_content_json_str = json.dumps(complete_submission_content, indent=2, ensure_ascii=False)
# Request JSON for the API call - includes all fields separately
request_json = {
"username": user_id,
"model_id": model,
"base_model": base_model or "",
"model_sha": revision,
"model_dtype": precision,
"weight_type": weight_type,
"model_type": model_type or "",
"content": complete_content_json_str, # Complete JSON with all fields
"submit_time": current_time,
"commit_message": commit_message,
}
# Check for duplicate submission
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
return styled_warning("This model has been already submitted.")
try:
response = requests.post(
url=f"http://localhost:{settings.BACKEND_PORT}/api/v1/hf/community/submit/",
json=request_json, # 使用 json 参数发送 JSON body
headers={"Content-Type": "application/json"},
)
print("response: ", response) # print response content for debugging
if response.status_code == 200:
data = response.json()
print("returned data: ", data)
if data.get("code") == 0:
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for the model to show in the PENDING list."
)
return styled_error("Submission unsuccessful.")
except Exception:
return styled_error("Submission unsuccessful.")
def add_new_eval(
model: str,
base_model: str,
revision: str,
precision: str,
weight_type: str,
model_type: str,
):
global REQUESTED_MODELS
if not REQUESTED_MODELS:
REQUESTED_MODELS, _ = already_submitted_models(settings.EVAL_REQUESTS_PATH.as_posix())
user_name = ""
model_path = model
if "/" in model:
user_name = model.split("/")[0]
model_path = model.split("/")[1]
precision = precision.split(" ")[0]
current_time = datetime.now(UTC).strftime("%Y-%m-%dT%H:%M:%SZ")
if model_type is None or model_type == "":
return styled_error("Please select a model type.")
# Does the model actually exist?
if revision == "":
revision = "main"
# Is the model on the hub?
if weight_type in ["Delta", "Adapter"]:
base_model_on_hub, error, _ = is_model_on_hub(
model_name=base_model, revision=revision, token=settings.HF_TOKEN.get_secret_value(), test_tokenizer=True
)
if not base_model_on_hub:
return styled_error(f'Base model "{base_model}" {error}')
if not weight_type == "Adapter":
model_on_hub, error, _ = is_model_on_hub(
model_name=model, revision=revision, token=settings.HF_TOKEN.get_secret_value(), test_tokenizer=True
)
if not model_on_hub:
return styled_error(f'Model "{model}" {error}')
# Is the model info correctly filled?
try:
model_info = API.model_info(repo_id=model, revision=revision)
except Exception:
return styled_error("Could not get your model information. Please fill it up properly.")
model_size = get_model_size(model_info=model_info, precision=precision)
# Were the model card and license filled?
try:
license = model_info.cardData["license"]
except Exception:
return styled_error("Please select a license for your model")
modelcard_OK, error_msg = check_model_card(model)
if not modelcard_OK:
return styled_error(error_msg)
# Seems good, creating the eval
print("Adding new eval")
eval_entry = {
"model": model,
"base_model": base_model,
"revision": revision,
"precision": precision,
"weight_type": weight_type,
"status": "PENDING",
"submitted_time": current_time,
"model_type": model_type,
"likes": model_info.likes,
"params": model_size,
"license": license,
"private": False,
}
# Check for duplicate submission
if f"{model}_{revision}_{precision}" in REQUESTED_MODELS:
return styled_warning("This model has been already submitted.")
print("Creating eval file")
OUT_DIR = f"{settings.EVAL_REQUESTS_PATH}/{user_name}"
os.makedirs(OUT_DIR, exist_ok=True)
out_path = f"{OUT_DIR}/{model_path}_eval_request_False_{precision}_{weight_type}.json"
with open(out_path, "w") as f:
f.write(json.dumps(eval_entry))
print("Uploading eval file")
API.upload_file(
path_or_fileobj=out_path,
path_in_repo=out_path.split("eval-queue/")[1],
repo_id=settings.QUEUE_REPO_ID,
repo_type="dataset",
commit_message=f"Add {model} to eval queue",
)
# Remove the local file
os.remove(out_path)
return styled_message(
"Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list."
)