Paul Gavrikov commited on
Commit
6c3527c
·
1 Parent(s): c10f6fe

initial test

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ # keep ground truth file hidden
37
+ ground_truth.json filter=lfs diff=lfs merge=lfs -text
__pycache__/eval_utils.cpython-311.pyc ADDED
Binary file (526 Bytes). View file
 
app.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import json
3
+ import os
4
+ import uuid
5
+ from eval_utils import evaluate_submission
6
+
7
+ GROUND_TRUTH_PATH = "ground_truth.json"
8
+ SUBMISSION_DIR = "submissions"
9
+ RATE_LIMIT_SECONDS = 60 # basic rate limit
10
+
11
+ os.makedirs(SUBMISSION_DIR, exist_ok=True)
12
+ user_last_submission = {}
13
+
14
+ def submit(file, identifier, request: gr.Request):
15
+
16
+ username = request.username # Hugging Face username
17
+
18
+ # Rate limiting
19
+ from time import time
20
+ now = time()
21
+ if username in user_last_submission:
22
+ if now - user_last_submission[username] < RATE_LIMIT_SECONDS:
23
+ return f"Please wait before submitting again (rate limit: {RATE_LIMIT_SECONDS}s)."
24
+
25
+ # Read submission
26
+ submission_data = json.load(file)
27
+
28
+ # Load private ground truth (not exposed to users)
29
+ with open(GROUND_TRUTH_PATH, "r") as f:
30
+ ground_truth = json.load(f)
31
+
32
+ # Evaluate
33
+ score = evaluate_submission(submission_data, ground_truth)
34
+
35
+ # Save submission
36
+ submission_id = str(uuid.uuid4())
37
+ with open(f"{SUBMISSION_DIR}/{submission_id}_{username}_{identifier}.json", "w") as f:
38
+ json.dump({
39
+ "username": username,
40
+ "identifier": identifier,
41
+ "score": score,
42
+ "timestamp": now,
43
+ }, f)
44
+
45
+ user_last_submission[username] = now
46
+ return f"Submission accepted. Score: {score:.4f}"
47
+
48
+ with gr.Blocks(auth=True) as demo:
49
+ gr.Markdown("# 🚀 ML Challenge Submission Portal")
50
+ identifier = gr.Text(label="Your submission identifier (e.g., 'o5.1 SuperAGI MegaPrompt')")
51
+ file = gr.File(label="Upload your prediction file (.json)", file_types=[".json"])
52
+ submit_btn = gr.Button("Submit")
53
+ output = gr.Textbox()
54
+
55
+ submit_btn.click(fn=submit, inputs=[file, identifier], outputs=output)
56
+
57
+ demo.launch()
eval_utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ def evaluate_submission(preds, ground_truth):
2
+ correct = 0
3
+ total = len(ground_truth)
4
+
5
+ for key in ground_truth:
6
+ if key in preds and preds[key] == ground_truth[key]:
7
+ correct += 1
8
+
9
+ return correct / total
ground_truth.json ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ gradio