File size: 4,796 Bytes
fc6d86b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
"""
Sample submission script for Task 23: Property Inference.

Auxiliary dataset: https://huggingface.co/datasets/SprintML/Property_Inference
Download texas_auxiliary.npz from there and place it in the same directory as this script.

Steps:
  1. Query the predict API for each model using auxiliary probe data
  2. Compute a confidence score per model (higher = more likely World A)
  3. Save predictions to submission.csv
  4. Submit submission.csv to the hackathon platform

Submission format (submission.csv):
    model_id,score
    0,0.73
    1,0.41
    ...

Where score is a float in [0, 1]:
    - Higher score = more likely World A (70% male training data)
    - Lower score  = more likely World B (50% male training data)
    - 0.5 = uncertain (this is the default for missing model_ids)
    - Do not submit scores of exactly 0 or 1

All 140 model_ids should be included. Missing ones default to 0.5.

API rate limits:
    - Per model: 15 minute cooldown after a successful query
    - Failed requests: 2 minute cooldown
    - Max batch size: 100 records per request
"""

import csv
import json
import os

import numpy as np
import requests

# ── Configuration ──────────────────────────────────────────────────────────────
BASE_URL = "http://35.192.205.84:80"
API_KEY  = "YOUR_API_KEY_HERE"
TASK_ID  = "23-property-inference"

# Paths (relative to this script)
HERE          = os.path.dirname(os.path.abspath(__file__))
MODEL_IDS     = json.load(open(os.path.join(HERE, "model_ids.json")))
AUXILIARY_NPZ = np.load(os.path.join(HERE, "texas_auxiliary.npz"))

PROBE_FEATURES = AUXILIARY_NPZ["features"].tolist()  # shape (10000, 10), already normalized
OUTPUT_CSV     = "submission.csv"

HEADERS = {"X-API-Key": API_KEY, "Content-Type": "application/json"}

# ── Query API ──────────────────────────────────────────────────────────────────
def query_model(model_id: int, features: list) -> list:
    """Query the predict API and return predicted labels."""
    resp = requests.post(
        f"{BASE_URL}/23-property-inference/predict",
        headers=HEADERS,
        json={"model_id": model_id, "features": features},
        timeout=30,
    )
    resp.raise_for_status()
    return resp.json()["labels"]


# ── Score computation (replace with your own method) ───────────────────────────
def compute_score(labels_a: list, labels_b: list = None) -> float:
    """
    Compute a confidence score in [0, 1] that a model belongs to World A.

    This baseline compares label distributions across two queries β€” you should
    replace this with your own property inference method.

    Returns:
        float in [0, 1] β€” higher means more likely World A
    """
    # Placeholder: random score. Replace with your actual method.
    score = float(np.random.uniform(0, 1))
    return float(np.clip(score, 0.001, 0.999))


# ── Main ───────────────────────────────────────────────────────────────────────
def main():
    predictions = {}

    # Use first 100 probe records (max batch size)
    probe_batch = PROBE_FEATURES[:100]

    for model_id in MODEL_IDS:
        print(f"Querying model {model_id}...")
        try:
            labels = query_model(model_id, probe_batch)
            score = compute_score(labels)
        except requests.exceptions.HTTPError as e:
            if e.response.status_code == 429:
                print(f"  Rate limited on model {model_id} β€” skipping (will default to 0.5)")
                continue
            else:
                raise
        predictions[model_id] = score

    # Write CSV
    with open(OUTPUT_CSV, "w", newline="") as f:
        writer = csv.writer(f)
        writer.writerow(["model_id", "score"])
        for model_id in MODEL_IDS:
            score = predictions.get(model_id, 0.5)
            writer.writerow([model_id, round(score, 6)])

    print(f"\nSaved {len(predictions)} predictions to {OUTPUT_CSV}")
    print(f"Missing models (defaulted to 0.5): {len(MODEL_IDS) - len(predictions)}")

    # Submit
    print("\nSubmitting...")
    with open(OUTPUT_CSV, "rb") as f:
        resp = requests.post(
            f"{BASE_URL}/submit/{TASK_ID}",
            headers={"X-API-Key": API_KEY},
            files={"file": (OUTPUT_CSV, f, "text/csv")},
            timeout=120,
        )
    print("Response:", resp.json())


if __name__ == "__main__":
    main()