IvoHoese commited on
Commit
8a85974
·
verified ·
1 Parent(s): 41e77e0

Upload task_template.py

Browse files
Files changed (1) hide show
  1. task_template.py +259 -0
task_template.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import pandas as pd
3
+ import requests
4
+ import sys
5
+ import torchvision.models as models
6
+ import os
7
+
8
+ from transformers import AutoTokenizer, PreTrainedModel
9
+ from peft import PeftModel
10
+ from hf_olmo import OLMoForCausalLM
11
+
12
+ # --------------------------------
13
+ # DATASET
14
+ # --------------------------------
15
+
16
+ """
17
+ Dataset contents:
18
+
19
+ - 100 subsets of text data, each subset stored under the key "subset_{i}" where i ranges from 0 to 99.
20
+ Each subset is a dictionary with:
21
+ -"sentences": List of 100 sentences in the subset
22
+ -"input_ids": Tensor of tokenized input IDs for the sentences, has shape (100, MAX_LENGTH)
23
+ -"attention_mask": Tensor of attention masks for the tokenized inputs, has shape (100, MAX_LENGTH)
24
+ -"labels": Tensor of true labels for the sentences in the subset, has shape (100)
25
+ -"subset_id": Integer ID of the subset (from 0 to 99)
26
+ """
27
+
28
+ # Load the dataset
29
+ dataset = torch.load("subsets_dataset.pt")
30
+
31
+ # Example: Acessing subsets
32
+ subset_0 = dataset["subset_0"]
33
+
34
+ print("Subset 0 keys:", subset_0.keys())
35
+ print("Subset ID:", subset_0["subset_id"])
36
+ print("Labels shape:", subset_0["labels"].shape)
37
+ print("First sentence:", subset_0["sentences"][:1])
38
+ print("First 10 labels:", subset_0["labels"][:10])
39
+
40
+ # --------------------------------
41
+ # QUERYING THE CLASSIFIER
42
+ # --------------------------------
43
+
44
+ # You can use the following Code to load and query the LLM with sentences:
45
+
46
+ # IMPORTANT: adapter_config.json and adapter_model.safetensors must be put in a folder named "LORA"
47
+ # Sorry for the inconvenience!
48
+
49
+ DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
50
+
51
+ BASE_MODEL = "allenai/OLMo-1B"
52
+
53
+ tokenizer = AutoTokenizer.from_pretrained(
54
+ BASE_MODEL,
55
+ trust_remote_code=True,
56
+ )
57
+ if tokenizer.pad_token is None:
58
+ tokenizer.pad_token = tokenizer.eos_token
59
+
60
+ def _no_op_mark_tied_weights_as_initialized(self):
61
+ return
62
+
63
+ PreTrainedModel.mark_tied_weights_as_initialized = (
64
+ _no_op_mark_tied_weights_as_initialized
65
+ )
66
+
67
+ def _empty_tied_weights_keys(self):
68
+ return {}
69
+
70
+ PreTrainedModel.all_tied_weights_keys = property(_empty_tied_weights_keys)
71
+ OLMoForCausalLM.all_tied_weights_keys = property(_empty_tied_weights_keys)
72
+
73
+ def _no_op_tie_weights(self, *args, **kwargs):
74
+ return
75
+
76
+ OLMoForCausalLM.tie_weights = _no_op_tie_weights
77
+
78
+ base_model = OLMoForCausalLM.from_pretrained(
79
+ BASE_MODEL,
80
+ trust_remote_code=True,
81
+ torch_dtype=torch.float16 if DEVICE.type == "cuda" else torch.float32,
82
+ )
83
+
84
+ model = PeftModel.from_pretrained(
85
+ base_model,
86
+ "LORA",
87
+ )
88
+
89
+ model.config.use_cache = False
90
+
91
+ model.eval()
92
+ model.to(DEVICE)
93
+
94
+ subset = dataset["subset_0"]
95
+
96
+ input_ids = subset["input_ids"].to(DEVICE)
97
+ attention_mask = subset["attention_mask"].to(DEVICE)
98
+
99
+ with torch.no_grad():
100
+ outputs = model(
101
+ input_ids=input_ids,
102
+ attention_mask=attention_mask,
103
+ return_dict=True,
104
+ )
105
+
106
+ logits = outputs.logits
107
+ last_idx = attention_mask.sum(dim=1) - 1
108
+ batch_idx = torch.arange(logits.size(0), device=logits.device)
109
+ final_logits = logits[batch_idx, last_idx]
110
+
111
+ pos_id = tokenizer.encode(" positive", add_special_tokens=False)[0]
112
+ neg_id = tokenizer.encode(" negative", add_special_tokens=False)[0]
113
+
114
+ subset_logits = final_logits[:, [neg_id, pos_id]]
115
+
116
+ print(f"Logits shape: {subset_logits.shape}") # should be (100, 2)
117
+ print(f"First 10 logits: {subset_logits[:10]}")
118
+
119
+ # # --------------------------------
120
+ # # SUBMISSION FORMAT
121
+ # # --------------------------------
122
+
123
+ """
124
+ The submission must be a .csv file with the following format:
125
+
126
+ -"subset_id": ID of the subset (from 0 to 99)
127
+ -"membership": Membership score for each image (float)
128
+ """
129
+
130
+ # Example Submission:
131
+
132
+ subset_ids = list(range(len(dataset)))
133
+ membership_scores = torch.rand(len(dataset)).tolist()
134
+ submission_df = pd.DataFrame({
135
+ "subset_id": subset_ids,
136
+ "membership": membership_scores
137
+ })
138
+ submission_df.to_csv("example_submission.csv", index=None)
139
+
140
+ # --------------------------------
141
+ # SUBMISSION PROCESS
142
+ # --------------------------------
143
+
144
+ """
145
+ Example submission script for the LLM Set Membership Inference Task.
146
+
147
+ Submission Requirements (read carefully to avoid automatic rejection):
148
+
149
+ 1. CSV FORMAT
150
+ ----------------
151
+ - The file **must be a CSV** with extension `.csv`.
152
+ - It must contain **exactly two columns**, named:
153
+ subset_id, membership
154
+ → Column names must match exactly (lowercase, no extra spaces).
155
+ → Column order does not matter, but both must be present.
156
+
157
+ 2. ROW COUNT AND IDENTIFIERS
158
+ -------------------------------
159
+ - Your file must contain **exactly 100 rows**.
160
+ - Each row corresponds to one unique `subset_id` in the range **0–99** (inclusive).
161
+ - Every subset_id must appear **exactly once**.
162
+ - Do **not** add, remove, or rename any IDs.
163
+ - Do **not** include duplicates or missing entries.
164
+ - The evaluator checks:
165
+ subset_id.min() == 0
166
+ subset_id.max() == 99
167
+ subset_id.unique().size == 100
168
+
169
+ 3. MEMBERSHIP SCORES
170
+ ----------------------
171
+ - The `membership` column must contain **numeric values** representing your model’s predicted confidence
172
+ that the corresponding subset is a **member** of the training set.
173
+
174
+ Examples of valid membership values:
175
+ - Probabilities: values in [0.0, 1.0]
176
+ - Raw model scores: any finite numeric values (will be ranked for TPR@FPR=0.05)
177
+
178
+ - Do **not** submit string labels like "yes"/"no" or "member"/"non-member".
179
+ - The evaluator converts your `membership` column to numeric using `pd.to_numeric()`.
180
+ → Any non-numeric, NaN, or infinite entries will cause automatic rejection.
181
+
182
+ 4. TECHNICAL LIMITS
183
+ ----------------------
184
+ - Maximum file size: **20 MB**
185
+ - Encoding: UTF-8 recommended.
186
+ - Avoid extra columns, blank lines, or formulas.
187
+ - Ensure all values are numeric and finite.
188
+ - Supported data types: int, float (e.g., float32, float64)
189
+
190
+ 5. VALIDATION SUMMARY
191
+ ------------------------
192
+ Your submission will fail if:
193
+ - Columns don’t match exactly ("subset_id", "membership")
194
+ - Row count differs from 100
195
+ - Any subset_id is missing, duplicated, or outside [0, 99]
196
+ - Any membership value is NaN, Inf, or non-numeric
197
+ - File is too large or not a valid CSV
198
+
199
+ Two key metrics are computed:
200
+ 1. **ROC-AUC (Area Under the ROC Curve)** — measures overall discriminative ability.
201
+ 2. **TPR@FPR=0.05** — true positive rate when the false positive rate is at 5%.
202
+
203
+ """
204
+
205
+ BASE_URL = "http://35.192.205.84:80"
206
+ API_KEY = "YOUR_API_KEY_HERE" # replace with your actual API key
207
+
208
+ TASK_ID = "14-llm-dataset-inference"
209
+ FILE_PATH = "Your-Submission-File.csv" # replace with your actual file path
210
+
211
+ SUBMIT = False # Set to True to enable submission
212
+
213
+ def die(msg):
214
+ print(f"{msg}", file=sys.stderr)
215
+ sys.exit(1)
216
+
217
+ if SUBMIT:
218
+ if not os.path.isfile(FILE_PATH):
219
+ die(f"File not found: {FILE_PATH}")
220
+
221
+ try:
222
+ with open(FILE_PATH, "rb") as f:
223
+ files = {
224
+ # (fieldname) -> (filename, fileobj, content_type)
225
+ "file": (os.path.basename(FILE_PATH), f, "csv"),
226
+ }
227
+ resp = requests.post(
228
+ f"{BASE_URL}/submit/{TASK_ID}",
229
+ headers={"X-API-Key": API_KEY},
230
+ files=files,
231
+ timeout=(10, 120), # (connect timeout, read timeout)
232
+ )
233
+ # Helpful output even on non-2xx
234
+ try:
235
+ body = resp.json()
236
+ except Exception:
237
+ body = {"raw_text": resp.text}
238
+
239
+ if resp.status_code == 413:
240
+ die("Upload rejected: file too large (HTTP 413). Reduce size and try again.")
241
+
242
+ resp.raise_for_status()
243
+
244
+ submission_id = body.get("submission_id")
245
+ print("Successfully submitted.")
246
+ print("Server response:", body)
247
+ if submission_id:
248
+ print(f"Submission ID: {submission_id}")
249
+
250
+ except requests.exceptions.RequestException as e:
251
+ detail = getattr(e, "response", None)
252
+ print(f"Submission error: {e}")
253
+ if detail is not None:
254
+ try:
255
+ print("Server response:", detail.json())
256
+ except Exception:
257
+ print("Server response (text):", detail.text)
258
+ sys.exit(1)
259
+