File size: 13,011 Bytes
7b9478f 46b248b 7b9478f 46b248b 7b9478f 46b248b 7b9478f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 |
"""
Memorization Mining
Utilities for finding memorized sequences from training data.
Based on: "From Memorization to Reasoning in the Spectrum of Loss Curvature"
"""
import torch
import torch.nn as nn
from torch import Tensor
from typing import Optional, Iterator
from dataclasses import dataclass
import json
from tqdm import tqdm
import random
@dataclass
class MemorizedSequence:
"""A memorized sequence with prefix and suffix."""
prefix: str
suffix: str
prefix_ids: list[int]
suffix_ids: list[int]
source: str = "" # Dataset source
def to_dict(self) -> dict:
return {
"prefix": self.prefix,
"suffix": self.suffix,
"prefix_ids": self.prefix_ids,
"suffix_ids": self.suffix_ids,
"source": self.source,
}
@classmethod
def from_dict(cls, d: dict) -> "MemorizedSequence":
return cls(**d)
def sample_sequences(
dataset_name: str,
tokenizer,
num_sequences: int = 10000,
prefix_len: int = 64,
suffix_len: int = 48,
min_text_tokens: int = None,
seed: int = 42,
streaming: bool = True,
dataset_config: Optional[str] = "en", # Default to English for C4
text_column: str = "text",
) -> list[MemorizedSequence]:
"""
Sample candidate (prefix, suffix) pairs from a dataset.
The sequences are drawn by:
1. Streaming text from the dataset
2. Tokenizing each text
3. Extracting windows of (prefix_len + suffix_len) tokens
Args:
dataset_name: HuggingFace dataset name
tokenizer: Tokenizer for the model
num_sequences: Number of sequences to sample
prefix_len: Length of prefix in tokens
suffix_len: Length of suffix in tokens
min_text_tokens: Minimum tokens in text to be considered
seed: Random seed
streaming: Use streaming mode
dataset_config: Dataset configuration/subset
text_column: Name of the text column
Returns:
List of MemorizedSequence candidates
"""
from datasets import load_dataset
random.seed(seed)
total_len = prefix_len + suffix_len
min_text_tokens = min_text_tokens or total_len + 10
# Load dataset
if dataset_config:
ds = load_dataset(dataset_name, name=dataset_config, split="train", streaming=streaming)
else:
ds = load_dataset(dataset_name, split="train", streaming=streaming)
if streaming:
ds = ds.shuffle(buffer_size=10000, seed=seed)
sequences = []
seen_prefixes = set() # For deduplication
pbar = tqdm(total=num_sequences, desc="Sampling sequences")
for example in ds:
if len(sequences) >= num_sequences:
break
# Get text
text = example.get(text_column)
if not text:
continue
# Tokenize
tokens = tokenizer.encode(text, add_special_tokens=False)
if len(tokens) < min_text_tokens:
continue
# Sample a random window
max_start = len(tokens) - total_len
if max_start <= 0:
continue
start_idx = random.randint(0, max_start)
prefix_ids = tokens[start_idx:start_idx + prefix_len]
suffix_ids = tokens[start_idx + prefix_len:start_idx + total_len]
# Deduplicate by prefix
prefix_tuple = tuple(prefix_ids)
if prefix_tuple in seen_prefixes:
continue
seen_prefixes.add(prefix_tuple)
# Decode back to text
prefix = tokenizer.decode(prefix_ids)
suffix = tokenizer.decode(suffix_ids)
sequences.append(MemorizedSequence(
prefix=prefix,
suffix=suffix,
prefix_ids=prefix_ids,
suffix_ids=suffix_ids,
source=dataset_name,
))
pbar.update(1)
pbar.close()
return sequences
@torch.no_grad()
def check_memorization_batch(
model: nn.Module,
tokenizer,
sequences: list[MemorizedSequence],
strict: bool = True,
) -> list[tuple[MemorizedSequence, bool, float]]:
"""
Check if a batch of sequences is memorized.
Args:
model: Language model
tokenizer: Tokenizer
sequences: List of sequences to check
strict: If True, require exact match; if False, use overlap threshold
Returns:
List of (sequence, is_memorized, overlap_score) tuples
"""
model.eval()
device = next(model.parameters()).device
# Prepare batch
prefixes = [seq.prefix for seq in sequences]
suffix_lengths = [len(seq.suffix_ids) for seq in sequences]
max_suffix_len = max(suffix_lengths)
# Tokenize prefixes
encoded = tokenizer(
prefixes,
return_tensors="pt",
padding=True,
truncation=True,
)
input_ids = encoded["input_ids"].to(device)
attention_mask = encoded["attention_mask"].to(device)
# Generate
from .evaluate import generate_greedy
generated = generate_greedy(
model, input_ids, max_suffix_len,
attention_mask=attention_mask,
pad_token_id=tokenizer.pad_token_id,
)
results = []
for i, (seq, gen_ids) in enumerate(zip(sequences, generated)):
gen_list = gen_ids.tolist()[:len(seq.suffix_ids)]
target_list = seq.suffix_ids
# Check match
is_exact = gen_list == target_list
# Compute overlap
matches = sum(g == t for g, t in zip(gen_list, target_list))
overlap = matches / len(target_list) if target_list else 0
is_memorized = is_exact if strict else (overlap >= 0.75)
results.append((seq, is_memorized, overlap))
return results
def filter_memorized(
model: nn.Module,
tokenizer,
candidates: list[MemorizedSequence],
batch_size: int = 8,
strict: bool = True,
progress_bar: bool = True,
) -> list[MemorizedSequence]:
"""
Filter candidates to keep only memorized sequences.
Args:
model: Language model
tokenizer: Tokenizer
candidates: List of candidate sequences
batch_size: Batch size for inference
strict: Require exact match
progress_bar: Show progress bar
Returns:
List of memorized sequences
"""
memorized = []
iterator = range(0, len(candidates), batch_size)
if progress_bar:
iterator = tqdm(iterator, desc="Filtering memorized")
for batch_start in iterator:
batch_end = min(batch_start + batch_size, len(candidates))
batch = candidates[batch_start:batch_end]
results = check_memorization_batch(model, tokenizer, batch, strict)
for seq, is_mem, overlap in results:
if is_mem:
memorized.append(seq)
return memorized
def mine_memorized_sequences(
model: nn.Module,
tokenizer,
dataset_name: str = "allenai/c4",
target_count: int = 1000,
max_candidates: int = 50000,
prefix_len: int = 64,
suffix_len: int = 48,
batch_size: int = 8,
seed: int = 42,
dataset_config: Optional[str] = "en", # Default to English for C4
strict: bool = True,
) -> list[MemorizedSequence]:
"""
Mine memorized sequences from training data.
This is the main pipeline for finding sequences that the model
has memorized verbatim.
Args:
model: Language model
tokenizer: Tokenizer
dataset_name: HuggingFace dataset name (should be training data)
target_count: Desired number of memorized sequences
max_candidates: Maximum candidates to sample
prefix_len: Prefix length in tokens
suffix_len: Suffix length in tokens
batch_size: Batch size for inference
seed: Random seed
dataset_config: Dataset configuration
strict: Require exact match
Returns:
List of memorized sequences
"""
print(f"Mining memorized sequences from {dataset_name}")
print(f"Target: {target_count} memorized, max candidates: {max_candidates}")
# Sample candidates
print("\nStep 1: Sampling candidates...")
candidates = sample_sequences(
dataset_name=dataset_name,
tokenizer=tokenizer,
num_sequences=max_candidates,
prefix_len=prefix_len,
suffix_len=suffix_len,
seed=seed,
dataset_config=dataset_config,
)
print(f"Sampled {len(candidates)} candidates")
# Filter for memorized
print("\nStep 2: Filtering for memorized sequences...")
memorized = filter_memorized(
model=model,
tokenizer=tokenizer,
candidates=candidates,
batch_size=batch_size,
strict=strict,
)
print(f"\nFound {len(memorized)} memorized sequences "
f"({len(memorized)/len(candidates)*100:.1f}% of candidates)")
# Truncate if we found more than needed
if len(memorized) > target_count:
random.seed(seed)
memorized = random.sample(memorized, target_count)
print(f"Truncated to {target_count} sequences")
return memorized
def save_sequences(sequences: list[MemorizedSequence], path: str) -> None:
"""Save sequences to a JSON file."""
data = [seq.to_dict() for seq in sequences]
with open(path, "w") as f:
json.dump(data, f, indent=2)
print(f"Saved {len(sequences)} sequences to {path}")
def load_sequences(path: str) -> list[MemorizedSequence]:
"""Load sequences from a JSON file."""
with open(path, "r") as f:
data = json.load(f)
sequences = [MemorizedSequence.from_dict(d) for d in data]
print(f"Loaded {len(sequences)} sequences from {path}")
return sequences
def split_sequences(
sequences: list[MemorizedSequence],
train_ratio: float = 0.8,
seed: int = 42,
) -> tuple[list[MemorizedSequence], list[MemorizedSequence]]:
"""
Split sequences into train and validation sets.
Args:
sequences: List of sequences
train_ratio: Fraction for training set
seed: Random seed
Returns:
Tuple of (train_sequences, val_sequences)
"""
random.seed(seed)
shuffled = sequences.copy()
random.shuffle(shuffled)
split_idx = int(len(shuffled) * train_ratio)
train = shuffled[:split_idx]
val = shuffled[split_idx:]
return train, val
def get_prefixes_and_suffixes(
sequences: list[MemorizedSequence],
) -> tuple[list[str], list[str]]:
"""
Extract prefix and suffix strings from sequences.
Useful for passing to evaluation functions.
"""
prefixes = [seq.prefix for seq in sequences]
suffixes = [seq.suffix for seq in sequences]
return prefixes, suffixes
# Historical quotes dataset (for out-of-distribution testing)
HISTORICAL_QUOTES = [
# Famous quotes that models often memorize
("To be, or not to be, that is the question:", " Whether 'tis nobler in the mind to suffer"),
("Four score and seven years ago our fathers brought forth", " on this continent, a new nation, conceived in Liberty"),
("I have a dream that one day this nation will rise up", " and live out the true meaning of its creed"),
("Ask not what your country can do for you", " — ask what you can do for your country"),
("The only thing we have to fear is", " fear itself"),
("In the beginning God created", " the heavens and the earth"),
("It was the best of times, it was the worst of times,", " it was the age of wisdom, it was the age of foolishness"),
("Call me Ishmael. Some years ago—never mind how long precisely", "—having little or no money in my purse"),
("All happy families are alike; each unhappy family is", " unhappy in its own way"),
("It is a truth universally acknowledged, that a single man in possession", " of a good fortune, must be in want of a wife"),
]
def create_quotes_dataset(
tokenizer,
additional_quotes: Optional[list[tuple[str, str]]] = None,
) -> list[MemorizedSequence]:
"""
Create a dataset of historical quotes for OOD memorization testing.
Args:
tokenizer: Tokenizer
additional_quotes: Additional (prefix, suffix) tuples to include
Returns:
List of MemorizedSequence objects
"""
quotes = HISTORICAL_QUOTES.copy()
if additional_quotes:
quotes.extend(additional_quotes)
sequences = []
for prefix, suffix in quotes:
prefix_ids = tokenizer.encode(prefix, add_special_tokens=False)
suffix_ids = tokenizer.encode(suffix, add_special_tokens=False)
sequences.append(MemorizedSequence(
prefix=prefix,
suffix=suffix,
prefix_ids=prefix_ids,
suffix_ids=suffix_ids,
source="historical_quotes",
))
return sequences
|