| """ |
| Sample submission script for Task 23: Property Inference. |
| |
| Auxiliary dataset: https://huggingface.co/datasets/SprintML/Property_Inference |
| Download texas_auxiliary.npz from there and place it in the same directory as this script. |
| |
| Steps: |
| 1. Query the predict API for each model using auxiliary probe data |
| 2. Compute a confidence score per model (higher = more likely World A) |
| 3. Save predictions to submission.csv |
| 4. Submit submission.csv to the hackathon platform |
| |
| Submission format (submission.csv): |
| model_id,score |
| 0,0.73 |
| 1,0.41 |
| ... |
| |
| Where score is a float in [0, 1]: |
| - Higher score = more likely World A (70% male training data) |
| - Lower score = more likely World B (50% male training data) |
| - 0.5 = uncertain (this is the default for missing model_ids) |
| - Do not submit scores of exactly 0 or 1 |
| |
| All 140 model_ids should be included. Missing ones default to 0.5. |
| |
| API rate limits: |
| - Per model: 15 minute cooldown after a successful query |
| - Failed requests: 2 minute cooldown |
| - Max batch size: 100 records per request |
| """ |
|
|
| import csv |
| import json |
| import os |
|
|
| import numpy as np |
| import requests |
|
|
| |
| BASE_URL = "http://35.192.205.84:80" |
| API_KEY = "YOUR_API_KEY_HERE" |
| TASK_ID = "23-property-inference" |
|
|
| |
| HERE = os.path.dirname(os.path.abspath(__file__)) |
| MODEL_IDS = json.load(open(os.path.join(HERE, "model_ids.json"))) |
| AUXILIARY_NPZ = np.load(os.path.join(HERE, "texas_auxiliary.npz")) |
|
|
| PROBE_FEATURES = AUXILIARY_NPZ["features"].tolist() |
| OUTPUT_CSV = "submission.csv" |
|
|
| HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"} |
|
|
| |
| def query_model(model_id: int, features: list) -> list: |
| """Query the predict API and return predicted labels.""" |
| resp = requests.post( |
| f"{BASE_URL}/23-property-inference/predict", |
| headers=HEADERS, |
| json={"model_id": model_id, "features": features}, |
| timeout=30, |
| ) |
| resp.raise_for_status() |
| return resp.json()["labels"] |
|
|
|
|
| |
| def compute_score(labels_a: list, labels_b: list = None) -> float: |
| """ |
| Compute a confidence score in [0, 1] that a model belongs to World A. |
| |
| This baseline compares label distributions across two queries β you should |
| replace this with your own property inference method. |
| |
| Returns: |
| float in [0, 1] β higher means more likely World A |
| """ |
| |
| score = float(np.random.uniform(0, 1)) |
| return float(np.clip(score, 0.001, 0.999)) |
|
|
|
|
| |
| def main(): |
| predictions = {} |
|
|
| |
| probe_batch = PROBE_FEATURES[:100] |
|
|
| for model_id in MODEL_IDS: |
| print(f"Querying model {model_id}...") |
| try: |
| labels = query_model(model_id, probe_batch) |
| score = compute_score(labels) |
| except requests.exceptions.HTTPError as e: |
| if e.response.status_code == 429: |
| print(f" Rate limited on model {model_id} β skipping (will default to 0.5)") |
| continue |
| else: |
| raise |
| predictions[model_id] = score |
|
|
| |
| with open(OUTPUT_CSV, "w", newline="") as f: |
| writer = csv.writer(f) |
| writer.writerow(["model_id", "score"]) |
| for model_id in MODEL_IDS: |
| score = predictions.get(model_id, 0.5) |
| writer.writerow([model_id, round(score, 6)]) |
|
|
| print(f"\nSaved {len(predictions)} predictions to {OUTPUT_CSV}") |
| print(f"Missing models (defaulted to 0.5): {len(MODEL_IDS) - len(predictions)}") |
|
|
| |
| print("\nSubmitting...") |
| with open(OUTPUT_CSV, "rb") as f: |
| resp = requests.post( |
| f"{BASE_URL}/submit/{TASK_ID}", |
| headers={"X-API-Key": API_KEY}, |
| files={"file": (OUTPUT_CSV, f, "text/csv")}, |
| timeout=120, |
| ) |
| print("Response:", resp.json()) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|