File size: 12,019 Bytes
19d2058 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 | """
lossy.py — Lossy Compression Backend
Simulates what real LLMs do to text under recursive compression:
- Drop modal operators ("must" → removed or softened)
- Paraphrase (swap words for synonyms)
- Add conversational filler ("Got it!", "Sure thing!")
- Lose specific quantities ($100 → "the amount", Friday → "soon")
This is NOT a real compressor. It's a DETERMINISTIC SIMULATION
of the drift patterns observed in live LLM testing (Meta Llama,
GPT-4, Claude — see empirical data in paper Section 6).
Why this exists:
- Extractive backend is too faithful (doesn't show the gap)
- BART requires 2GB+ model download
- API backends require credentials
- This runs anywhere, instantly, and shows the conservation law
The drift patterns are seeded for reproducibility.
Same input → same output → same lineage chain.
"""
import re
import random
import hashlib
from typing import List, Tuple
from .compression import CompressionBackend
# ---------------------------------------------------------------------------
# Drift patterns observed in real LLM testing
# ---------------------------------------------------------------------------
# Modal softening: strong modals → weak/removed
MODAL_DRIFT = {
'must': ['should', 'could', 'might want to', ''],
'shall': ['will', 'should', 'might', ''],
'cannot': ['probably shouldn\'t', 'might not want to', 'shouldn\'t', ''],
'shall not': ['probably shouldn\'t', 'might want to avoid', ''],
'must not': ['should avoid', 'probably shouldn\'t', ''],
'required to': ['expected to', 'encouraged to', 'asked to', ''],
'prohibited from': ['discouraged from', 'asked not to', ''],
'forbidden to': ['discouraged from', 'asked not to', ''],
'always': ['usually', 'often', 'typically', 'generally'],
'never': ['rarely', 'seldom', 'not usually', 'typically don\'t'],
}
# Quantity erosion: specific numbers → vague references
QUANTITY_DRIFT = [
(re.compile(r'\$\d[\d,]*'), ['the payment', 'the amount', 'the fee']),
(re.compile(r'\b\d+\s*(?:days?|hours?|minutes?|months?|years?|weeks?)\b', re.I),
['the timeframe', 'the period', 'a while']),
(re.compile(r'\b(?:Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday)\b', re.I),
['soon', 'by the deadline', 'on time']),
(re.compile(r'\b(?:January|February|March|April|May|June|July|August|September|October|November|December)\s+\d{1,2}(?:st|nd|rd|th)?\b', re.I),
['by the deadline', 'on time', 'as scheduled']),
(re.compile(r'\b\d{1,3}(?:,\d{3})*\b'), ['several', 'many', 'a number of']),
]
# Conversational filler (LLMs love adding these)
FILLER = [
"Got it. ",
"Sure thing. ",
"Understood. ",
"Makes sense. ",
"Right. ",
"OK so ",
"Basically, ",
"In other words, ",
"To summarize, ",
"The key point is ",
]
# Sentence padding (LLMs expand with these)
PADDING = [
" That's important to keep in mind.",
" Just wanted to make sure that's clear.",
" Let me know if you have questions.",
" Hope that helps!",
" Pretty straightforward.",
" Nothing too complicated here.",
]
class LossyBackend(CompressionBackend):
"""
Deterministic lossy compression simulating real LLM drift.
Drift intensity increases with each call (simulating recursive
degradation). The seed is derived from input text hash, so
same input always produces same output.
Parameters:
drift_rate: 0.0 (no drift) to 1.0 (maximum drift)
Controls probability of each drift operation.
add_filler: Whether to add conversational filler
iteration: Current recursion depth (increases drift)
"""
def __init__(self, drift_rate: float = 0.4, add_filler: bool = True):
self._drift_rate = drift_rate
self._add_filler = add_filler
self._call_count = 0
@property
def name(self) -> str:
return f'lossy(drift={self._drift_rate})'
def reset(self):
"""Reset call counter (for new signal)."""
self._call_count = 0
def compress(self, text: str, target_ratio: float = 0.5) -> str:
"""
Apply lossy transformation to text.
Drift increases with each call (self._call_count).
"""
self._call_count += 1
# Seed RNG from text hash for determinism
seed = int(hashlib.md5(text.encode()).hexdigest()[:8], 16) + self._call_count
rng = random.Random(seed)
# Effective drift rate increases with iteration
effective_rate = min(1.0, self._drift_rate * (1.0 + 0.2 * self._call_count))
result = text
# Stage 1: Modal softening
result = self._soften_modals(result, rng, effective_rate)
# Stage 2: Quantity erosion
result = self._erode_quantities(result, rng, effective_rate * 0.7)
# Stage 3: Sentence dropping (simulate compression)
result = self._drop_sentences(result, rng, target_ratio)
# Stage 4: Add filler (simulate LLM expansion)
if self._add_filler and rng.random() < effective_rate * 0.5:
result = self._add_conversational_filler(result, rng)
return result.strip()
def _soften_modals(self, text: str, rng: random.Random, rate: float) -> str:
"""Replace strong modals with weaker alternatives."""
result = text
# Sort by length descending to match multi-word modals first
for modal in sorted(MODAL_DRIFT.keys(), key=len, reverse=True):
if rng.random() < rate:
replacements = MODAL_DRIFT[modal]
replacement = rng.choice(replacements)
# Case-insensitive replacement, one occurrence at a time
pattern = re.compile(re.escape(modal), re.I)
match = pattern.search(result)
if match:
original = match.group()
# Preserve capitalization of first char
if original[0].isupper() and replacement:
replacement = replacement[0].upper() + replacement[1:]
result = result[:match.start()] + replacement + result[match.end():]
return result
def _erode_quantities(self, text: str, rng: random.Random, rate: float) -> str:
"""Replace specific quantities with vague references."""
result = text
for pattern, replacements in QUANTITY_DRIFT:
if rng.random() < rate:
match = pattern.search(result)
if match:
replacement = rng.choice(replacements)
result = result[:match.start()] + replacement + result[match.end():]
return result
def _drop_sentences(self, text: str, rng: random.Random, target_ratio: float) -> str:
"""Drop sentences to approximate target compression ratio."""
sentences = re.split(r'(?<=[.!?])\s+', text)
if len(sentences) <= 1:
return text
target_count = max(1, int(len(sentences) * target_ratio))
if len(sentences) <= target_count:
return text
# Score sentences: modal-bearing ones get kept more often
scored = []
for i, sent in enumerate(sentences):
has_modal = any(m in sent.lower() for m in ['must', 'shall', 'cannot', 'required', 'always', 'never'])
# Without enforcement, modal sentences have NO priority
# (that's the point — baseline doesn't know about commitments)
score = rng.random()
scored.append((score, i, sent))
scored.sort(key=lambda x: -x[0])
kept = scored[:target_count]
kept.sort(key=lambda x: x[1]) # Restore order
return ' '.join(sent for _, _, sent in kept)
def _add_conversational_filler(self, text: str, rng: random.Random) -> str:
"""Add LLM-style conversational filler."""
filler = rng.choice(FILLER)
padding = rng.choice(PADDING) if rng.random() < 0.3 else ''
return filler + text + padding
class LossyEnforcedBackend(CompressionBackend):
"""
Lossy backend that PRESERVES modal-bearing sentences during dropping.
This simulates what happens when a compressor is commitment-aware:
same drift patterns, but modal sentences get priority during selection.
The enforcement is in the SELECTION, not post-hoc injection.
"""
def __init__(self, drift_rate: float = 0.4, add_filler: bool = False):
self._drift_rate = drift_rate
self._add_filler = add_filler
self._call_count = 0
@property
def name(self) -> str:
return f'lossy_enforced(drift={self._drift_rate})'
def reset(self):
self._call_count = 0
def compress(self, text: str, target_ratio: float = 0.5) -> str:
self._call_count += 1
seed = int(hashlib.md5(text.encode()).hexdigest()[:8], 16) + self._call_count
rng = random.Random(seed)
result = text
# NO modal softening — that's what enforcement means.
# The gate preserves modal operators intact.
# NO quantity erosion on commitment-bearing sentences.
# Priority sentence selection (modal sentences always kept)
result = self._priority_drop(result, rng, target_ratio)
return result.strip()
def _mild_soften(self, text: str, rng: random.Random, rate: float) -> str:
"""Much lower drift rate for modals under enforcement."""
result = text
for modal in sorted(MODAL_DRIFT.keys(), key=len, reverse=True):
if rng.random() < rate:
replacements = [r for r in MODAL_DRIFT[modal] if r] # Exclude empty (deletion)
if replacements:
replacement = rng.choice(replacements)
pattern = re.compile(re.escape(modal), re.I)
match = pattern.search(result)
if match:
original = match.group()
if original[0].isupper() and replacement:
replacement = replacement[0].upper() + replacement[1:]
result = result[:match.start()] + replacement + result[match.end():]
return result
def _mild_erode(self, text: str, rng: random.Random, rate: float) -> str:
"""Lower erosion rate under enforcement."""
result = text
for pattern, replacements in QUANTITY_DRIFT:
if rng.random() < rate:
match = pattern.search(result)
if match:
replacement = rng.choice(replacements)
result = result[:match.start()] + replacement + result[match.end():]
return result
def _priority_drop(self, text: str, rng: random.Random, target_ratio: float) -> str:
"""Drop sentences but PRIORITIZE modal-bearing ones."""
sentences = re.split(r'(?<=[.!?])\s+', text)
if len(sentences) <= 1:
return text
target_count = max(1, int(len(sentences) * target_ratio))
if len(sentences) <= target_count:
return text
scored = []
for i, sent in enumerate(sentences):
has_modal = any(m in sent.lower() for m in
['must', 'shall', 'cannot', 'required', 'always', 'never',
'should', 'could', 'might', 'expected', 'encouraged'])
# Modal sentences get HIGH priority under enforcement
score = (1.0 if has_modal else 0.0) + rng.random() * 0.5
scored.append((score, i, sent))
scored.sort(key=lambda x: -x[0])
kept = scored[:target_count]
kept.sort(key=lambda x: x[1])
return ' '.join(sent for _, _, sent in kept)
|