|
|
import torch |
|
|
import pandas as pd |
|
|
import requests |
|
|
import sys |
|
|
import torchvision.models as models |
|
|
import os |
|
|
|
|
|
from transformers import AutoTokenizer, PreTrainedModel |
|
|
from peft import PeftModel |
|
|
from hf_olmo import OLMoForCausalLM |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Dataset contents: |
|
|
|
|
|
- 100 subsets of text data, each subset stored under the key "subset_{i}" where i ranges from 0 to 99. |
|
|
Each subset is a dictionary with: |
|
|
-"sentences": List of 100 sentences in the subset |
|
|
-"input_ids": Tensor of tokenized input IDs for the sentences, has shape (100, MAX_LENGTH) |
|
|
-"attention_mask": Tensor of attention masks for the tokenized inputs, has shape (100, MAX_LENGTH) |
|
|
-"labels": Tensor of true labels for the sentences in the subset, has shape (100) |
|
|
-"subset_id": Integer ID of the subset (from 0 to 99) |
|
|
""" |
|
|
|
|
|
|
|
|
dataset = torch.load("subsets_dataset.pt") |
|
|
|
|
|
|
|
|
subset_0 = dataset["subset_0"] |
|
|
|
|
|
print("Subset 0 keys:", subset_0.keys()) |
|
|
print("Subset ID:", subset_0["subset_id"]) |
|
|
print("Labels shape:", subset_0["labels"].shape) |
|
|
print("First sentence:", subset_0["sentences"][:1]) |
|
|
print("First 10 labels:", subset_0["labels"][:10]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
BASE_MODEL = "allenai/OLMo-1B" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
|
BASE_MODEL, |
|
|
trust_remote_code=True, |
|
|
) |
|
|
if tokenizer.pad_token is None: |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
def _no_op_mark_tied_weights_as_initialized(self): |
|
|
return |
|
|
|
|
|
PreTrainedModel.mark_tied_weights_as_initialized = ( |
|
|
_no_op_mark_tied_weights_as_initialized |
|
|
) |
|
|
|
|
|
def _empty_tied_weights_keys(self): |
|
|
return {} |
|
|
|
|
|
PreTrainedModel.all_tied_weights_keys = property(_empty_tied_weights_keys) |
|
|
OLMoForCausalLM.all_tied_weights_keys = property(_empty_tied_weights_keys) |
|
|
|
|
|
def _no_op_tie_weights(self, *args, **kwargs): |
|
|
return |
|
|
|
|
|
OLMoForCausalLM.tie_weights = _no_op_tie_weights |
|
|
|
|
|
base_model = OLMoForCausalLM.from_pretrained( |
|
|
BASE_MODEL, |
|
|
trust_remote_code=True, |
|
|
torch_dtype=torch.float16 if DEVICE.type == "cuda" else torch.float32, |
|
|
) |
|
|
|
|
|
model = PeftModel.from_pretrained( |
|
|
base_model, |
|
|
"LORA", |
|
|
) |
|
|
|
|
|
model.config.use_cache = False |
|
|
|
|
|
model.eval() |
|
|
model.to(DEVICE) |
|
|
|
|
|
subset = dataset["subset_0"] |
|
|
|
|
|
input_ids = subset["input_ids"].to(DEVICE) |
|
|
attention_mask = subset["attention_mask"].to(DEVICE) |
|
|
|
|
|
with torch.no_grad(): |
|
|
outputs = model( |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
return_dict=True, |
|
|
) |
|
|
|
|
|
logits = outputs.logits |
|
|
last_idx = attention_mask.sum(dim=1) - 1 |
|
|
batch_idx = torch.arange(logits.size(0), device=logits.device) |
|
|
final_logits = logits[batch_idx, last_idx] |
|
|
|
|
|
pos_id = tokenizer.encode(" positive", add_special_tokens=False)[0] |
|
|
neg_id = tokenizer.encode(" negative", add_special_tokens=False)[0] |
|
|
|
|
|
subset_logits = final_logits[:, [neg_id, pos_id]] |
|
|
|
|
|
print(f"Logits shape: {subset_logits.shape}") |
|
|
print(f"First 10 logits: {subset_logits[:10]}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
The submission must be a .csv file with the following format: |
|
|
|
|
|
-"subset_id": ID of the subset (from 0 to 99) |
|
|
-"membership": Membership score for each image (float) |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
subset_ids = list(range(len(dataset))) |
|
|
membership_scores = torch.rand(len(dataset)).tolist() |
|
|
submission_df = pd.DataFrame({ |
|
|
"subset_id": subset_ids, |
|
|
"membership": membership_scores |
|
|
}) |
|
|
submission_df.to_csv("example_submission.csv", index=None) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Example submission script for the LLM Set Membership Inference Task. |
|
|
|
|
|
Submission Requirements (read carefully to avoid automatic rejection): |
|
|
|
|
|
1. CSV FORMAT |
|
|
---------------- |
|
|
- The file **must be a CSV** with extension `.csv`. |
|
|
- It must contain **exactly two columns**, named: |
|
|
subset_id, membership |
|
|
→ Column names must match exactly (lowercase, no extra spaces). |
|
|
→ Column order does not matter, but both must be present. |
|
|
|
|
|
2. ROW COUNT AND IDENTIFIERS |
|
|
------------------------------- |
|
|
- Your file must contain **exactly 100 rows**. |
|
|
- Each row corresponds to one unique `subset_id` in the range **0–99** (inclusive). |
|
|
- Every subset_id must appear **exactly once**. |
|
|
- Do **not** add, remove, or rename any IDs. |
|
|
- Do **not** include duplicates or missing entries. |
|
|
- The evaluator checks: |
|
|
subset_id.min() == 0 |
|
|
subset_id.max() == 99 |
|
|
subset_id.unique().size == 100 |
|
|
|
|
|
3. MEMBERSHIP SCORES |
|
|
---------------------- |
|
|
- The `membership` column must contain **numeric values** representing your model’s predicted confidence |
|
|
that the corresponding subset is a **member** of the training set. |
|
|
|
|
|
Examples of valid membership values: |
|
|
- Probabilities: values in [0.0, 1.0] |
|
|
- Raw model scores: any finite numeric values (will be ranked for TPR@FPR=0.05) |
|
|
|
|
|
- Do **not** submit string labels like "yes"/"no" or "member"/"non-member". |
|
|
- The evaluator converts your `membership` column to numeric using `pd.to_numeric()`. |
|
|
→ Any non-numeric, NaN, or infinite entries will cause automatic rejection. |
|
|
|
|
|
4. TECHNICAL LIMITS |
|
|
---------------------- |
|
|
- Maximum file size: **20 MB** |
|
|
- Encoding: UTF-8 recommended. |
|
|
- Avoid extra columns, blank lines, or formulas. |
|
|
- Ensure all values are numeric and finite. |
|
|
- Supported data types: int, float (e.g., float32, float64) |
|
|
|
|
|
5. VALIDATION SUMMARY |
|
|
------------------------ |
|
|
Your submission will fail if: |
|
|
- Columns don’t match exactly ("subset_id", "membership") |
|
|
- Row count differs from 100 |
|
|
- Any subset_id is missing, duplicated, or outside [0, 99] |
|
|
- Any membership value is NaN, Inf, or non-numeric |
|
|
- File is too large or not a valid CSV |
|
|
|
|
|
Two key metrics are computed: |
|
|
1. **ROC-AUC (Area Under the ROC Curve)** — measures overall discriminative ability. |
|
|
2. **TPR@FPR=0.05** — true positive rate when the false positive rate is at 5%. |
|
|
|
|
|
""" |
|
|
|
|
|
BASE_URL = "http://35.192.205.84:80" |
|
|
API_KEY = "YOUR_API_KEY_HERE" |
|
|
|
|
|
TASK_ID = "14-llm-dataset-inference" |
|
|
FILE_PATH = "Your-Submission-File.csv" |
|
|
|
|
|
SUBMIT = False |
|
|
|
|
|
def die(msg): |
|
|
print(f"{msg}", file=sys.stderr) |
|
|
sys.exit(1) |
|
|
|
|
|
if SUBMIT: |
|
|
if not os.path.isfile(FILE_PATH): |
|
|
die(f"File not found: {FILE_PATH}") |
|
|
|
|
|
try: |
|
|
with open(FILE_PATH, "rb") as f: |
|
|
files = { |
|
|
|
|
|
"file": (os.path.basename(FILE_PATH), f, "csv"), |
|
|
} |
|
|
resp = requests.post( |
|
|
f"{BASE_URL}/submit/{TASK_ID}", |
|
|
headers={"X-API-Key": API_KEY}, |
|
|
files=files, |
|
|
timeout=(10, 120), |
|
|
) |
|
|
|
|
|
try: |
|
|
body = resp.json() |
|
|
except Exception: |
|
|
body = {"raw_text": resp.text} |
|
|
|
|
|
if resp.status_code == 413: |
|
|
die("Upload rejected: file too large (HTTP 413). Reduce size and try again.") |
|
|
|
|
|
resp.raise_for_status() |
|
|
|
|
|
submission_id = body.get("submission_id") |
|
|
print("Successfully submitted.") |
|
|
print("Server response:", body) |
|
|
if submission_id: |
|
|
print(f"Submission ID: {submission_id}") |
|
|
|
|
|
except requests.exceptions.RequestException as e: |
|
|
detail = getattr(e, "response", None) |
|
|
print(f"Submission error: {e}") |
|
|
if detail is not None: |
|
|
try: |
|
|
print("Server response:", detail.json()) |
|
|
except Exception: |
|
|
print("Server response (text):", detail.text) |
|
|
sys.exit(1) |
|
|
|
|
|
|