File size: 5,407 Bytes
5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca 5224096 3914dca | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 | """
Sample submission script for Task 23: Property Inference.
Auxiliary dataset: https://huggingface.co/datasets/SprintML/Property_Inference
Download auxiliary.npz from there and place it in the same directory as this script.
Steps:
1. Query the predict API for each model using auxiliary probe data
2. Compute a confidence score per model (higher = more likely World A)
3. Save predictions to submission.csv
4. Submit submission.csv to the hackathon platform
Submission format (submission.csv):
model_id,score
0,0.73
1,0.41
...
Where score is a float in [0, 1]:
- Higher score = more likely World A (70% male training data)
- Lower score = more likely World B (50% male training data)
- 0.5 = uncertain (this is the default for missing model_ids)
All 200 model_ids should be included. Missing ones default to 0.5.
API response format:
{
"model_id": 0,
"batch_size": 100,
"labels": [[4, 7], [2, 9], ...], # top-2 predicted classes per record
"probs": [[0.52, 0.21], [0.44, 0.18], ...] # top-2 softmax probabilities per record
}
API rate limits:
- Per model: 2 minute cooldown after a successful query
- Failed requests: 2 minute cooldown
- Max batch size: 500 records per request
"""
import csv
import json
import os
import numpy as np
import requests
# ββ Configuration ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
BASE_URL = "http://35.192.205.84:80"
API_KEY = "YOUR_API_KEY_HERE"
TASK_ID = "23-property-inference"
# Paths (relative to this script)
HERE = os.path.dirname(os.path.abspath(__file__))
MODEL_IDS = json.load(open(os.path.join(HERE, "model_ids.json")))
AUXILIARY_NPZ = np.load(os.path.join(HERE, "auxiliary.npz"))
PROBE_FEATURES = AUXILIARY_NPZ["features"].tolist() # shape (10000, 10), already normalized
OUTPUT_CSV = "submission.csv"
HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"}
# ββ Query API ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def query_model(model_id: int, features: list) -> dict:
"""
Query the predict API for one model.
Returns a dict with:
labels: list[list[int]] β top-2 predicted classes per record
probs: list[list[float]] β top-2 softmax probabilities per record
"""
resp = requests.post(
f"{BASE_URL}/23-property-inference/predict",
headers=HEADERS,
json={"model_id": model_id, "features": features},
timeout=30,
)
resp.raise_for_status()
data = resp.json()
return {"labels": data["labels"], "probs": data["probs"]}
# ββ Score computation (replace with your own method) βββββββββββββββββββββββββββ
def compute_score(labels: list, probs: list) -> float:
"""
Compute a confidence score in [0, 1] that a model belongs to World A.
You have access to:
labels: list[list[int]] β top-2 predicted class indices per record
probs: list[list[float]] β top-2 softmax probabilities per record
This baseline returns a random score β replace with your actual method.
Returns:
float in [0, 1] β higher means more likely World A
"""
# Placeholder: random score. Replace with your actual method.
score = float(np.random.uniform(0, 1))
return float(np.clip(score, 0.001, 0.999))
# ββ Main βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
def main():
predictions = {}
# Use first 100 probe records (max batch size is 500)
probe_batch = PROBE_FEATURES[:100]
for model_id in MODEL_IDS:
print(f"Querying model {model_id}...")
try:
result = query_model(model_id, probe_batch)
score = compute_score(result["labels"], result["probs"])
except requests.exceptions.HTTPError as e:
if e.response.status_code == 429:
print(f" Rate limited on model {model_id} β skipping (will default to 0.5)")
continue
else:
raise
predictions[model_id] = score
print(f" score={score:.4f}")
# Write CSV
with open(OUTPUT_CSV, "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["model_id", "score"])
for model_id in MODEL_IDS:
score = predictions.get(model_id, 0.5)
writer.writerow([model_id, round(score, 6)])
print(f"\nSaved {len(predictions)} predictions to {OUTPUT_CSV}")
print(f"Missing models (defaulted to 0.5): {len(MODEL_IDS) - len(predictions)}")
# Submit
print("\nSubmitting...")
with open(OUTPUT_CSV, "rb") as f:
resp = requests.post(
f"{BASE_URL}/submit/{TASK_ID}",
headers={"X-API-Key": API_KEY},
files={"file": (OUTPUT_CSV, f, "text/csv")},
timeout=120,
)
print("Response:", resp.json())
if __name__ == "__main__":
main() |