File size: 21,891 Bytes
12fd5f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
"""
Extracts the statistical signature of human writing vs AI writing.
Uses Kaggle datasets to build:

1. HumanPatternProfile  β€” a statistical distribution of human writing features
2. AIPatternProfile     β€” a statistical distribution of AI writing features
3. HumanPatternClassifier β€” a lightweight FROZEN classifier used at training time
   to score how "human-like" the model's output looks.

The classifier is FROZEN during main model training. It is pre-trained separately
on the Kaggle datasets, then its output score is used as a reward/penalty signal
in the main training loss.

Feature set extracted (17 dimensions):
  - Perplexity under GPT-2 (AI text tends to be lower perplexity)
  - Burstiness score (human writing has more sentence length variance)
  - Sentence starter diversity
  - n-gram novelty scores (bigram, trigram, 4-gram)
  - AI marker density
  - Overused discourse density
  - Punctuation patterns (em-dash, ellipsis, comma, semicolon rates)
  - Distributional features (word count, sentence count, mean/std sent length, TTR)
"""

import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from typing import List, Tuple, Dict, Optional
import spacy
from collections import Counter
import math
from loguru import logger
from concurrent.futures import ProcessPoolExecutor
import multiprocessing as mp


# ── AI-Typical Overused Discourse Markers ───────────────────────────────────
AI_OVERUSED_MARKERS = {
    "furthermore", "moreover", "additionally", "consequently",
    "in conclusion", "to summarize", "it is worth noting",
    "it is important to note", "in today's world", "in today's society",
    "in the modern era", "as previously mentioned", "needless to say",
    "it goes without saying", "at the end of the day",
    "in terms of", "with regard to", "with respect to",
    "delve", "leverage", "utilize", "holistic", "paradigm",
    "transformative", "groundbreaking", "revolutionary", "game-changing",
    "multifaceted", "nuanced", "comprehensive", "robust", "seamless",
    "innovative", "synergy", "cutting-edge", "state-of-the-art",
}

# Words that AI uses far MORE than humans in academic-adjacent writing
AI_FINGERPRINT_WORDS = {
    "delve", "underscore", "tapestry", "intricate", "pivotal",
    "crucial", "vital", "essential", "significant", "notable",
    "commendable", "noteworthy", "straightforward", "straightforwardly",
    "elucidate", "expound", "illuminate", "unravel", "harness",
    "foster", "facilitate", "leverage", "optimize", "streamline",
}


# ── Standalone text-feature functions (picklable for multiprocessing) ───────
def _compute_text_features(text: str) -> np.ndarray:
    """Compute the 16 non-perplexity features from raw text.
    Returns a 16-dim float32 array (features 2-17, perplexity slot excluded).
    This function is designed to be called in a worker process.
    """
    if not text or not text.strip():
        return np.zeros(16, dtype=np.float32)

    words = text.split()
    word_count = max(len(words), 1)

    # Cheap sentence splitting (regex-based, avoids loading spaCy per worker)
    import re
    raw_sents = re.split(r'(?<=[.!?])\s+', text.strip())
    sentences = [s.strip() for s in raw_sents if s.strip()]
    sent_lengths = [len(s.split()) for s in sentences] if sentences else [0]

    features = []

    # 1. Burstiness
    if len(sentences) < 2:
        features.append(0.0)
    else:
        lengths = [len(s.split()) for s in sentences]
        mean_len = np.mean(lengths)
        features.append(float(np.std(lengths) / mean_len) if mean_len > 0 else 0.0)

    # 2. Sentence starter diversity
    if not sentences:
        features.append(0.0)
    else:
        starters = []
        for s in sentences:
            w = s.strip().split()
            if w:
                starters.append(w[0].lower())
        features.append(len(set(starters)) / len(starters) if starters else 0.0)

    # 3-5. N-gram novelty (bigram, trigram, 4-gram)
    words_lower = text.lower().split()
    for n in (2, 3, 4):
        if len(words_lower) < n:
            features.append(1.0)
        else:
            ngrams = [tuple(words_lower[i:i + n]) for i in range(len(words_lower) - n + 1)]
            features.append(len(set(ngrams)) / len(ngrams) if ngrams else 1.0)

    # 6. AI marker density
    word_set = set(text.lower().split())
    ai_count = len(word_set & AI_FINGERPRINT_WORDS)
    features.append((ai_count / word_count) * 100)

    # 7. Overused discourse density
    text_lower = text.lower()
    discourse_count = sum(1 for marker in AI_OVERUSED_MARKERS if marker in text_lower)
    features.append((discourse_count / word_count) * 100)

    # 8-11. Punctuation patterns
    features.append((text.count("β€”") + text.count("–")) / word_count * 100)  # em-dash
    features.append(text.count("...") / word_count * 100)  # ellipsis
    features.append(text.count(",") / word_count * 100)  # comma
    features.append(text.count(";") / word_count * 100)  # semicolon

    # 12. Word count (log-scaled)
    features.append(np.log1p(word_count))

    # 13. Sentence count (log-scaled)
    features.append(np.log1p(len(sentences)))

    # 14. Mean sentence length
    features.append(np.mean(sent_lengths))

    # 15. Std sentence length
    features.append(np.std(sent_lengths) if len(sent_lengths) > 1 else 0.0)

    # 16. Type-token ratio
    unique_words = set(w.lower() for w in words)
    features.append(len(unique_words) / word_count)

    return np.array(features, dtype=np.float32)


class HumanPatternFeatureExtractor:
    """Extracts 17-dimensional feature vector encoding human vs AI writing patterns.

    Optimised for bulk extraction:
      - GPT-2 perplexity computed in batches on GPU (if available)
      - Text features computed in parallel via multiprocessing
    """

    def __init__(self, spacy_model: str = "en_core_web_sm", device: Optional[str] = None):
        # Determine device
        if device is None:
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
        else:
            self.device = device

        # GPT-2 for perplexity calculation
        logger.info("Loading GPT-2 for perplexity calculation...")
        self.gpt2_model = GPT2LMHeadModel.from_pretrained("gpt2")
        self.gpt2_tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
        self.gpt2_tokenizer.pad_token = self.gpt2_tokenizer.eos_token
        self.gpt2_model.eval()

        # Move to best available device
        self.gpt2_model = self.gpt2_model.to(self.device)

        # Use half precision on GPU for speed
        if self.device == "cuda":
            self.gpt2_model = self.gpt2_model.half()
            logger.info(f"GPT-2 loaded on {self.device} with fp16")
        else:
            logger.info(f"GPT-2 loaded on {self.device}")

        logger.info("HumanPatternFeatureExtractor initialised")

    def _perplexity(self, text: str, max_len: int = 256) -> float:
        """GPT-2 perplexity for a single text. Lower = more AI-like."""
        try:
            encodings = self.gpt2_tokenizer(
                text, return_tensors="pt", truncation=True, max_length=max_len
            )
            input_ids = encodings["input_ids"].to(self.device)

            if input_ids.size(1) < 2:
                return 100.0  # Default for very short text

            with torch.no_grad():
                outputs = self.gpt2_model(input_ids, labels=input_ids)
                loss = outputs.loss

            return math.exp(min(loss.float().item(), 10))  # Cap to avoid inf
        except Exception:
            return 100.0  # Safe default

    def _perplexity_batch(self, texts: List[str], max_len: int = 256, batch_size: int = 8) -> List[float]:
        """Compute GPT-2 perplexity for a batch of texts efficiently on GPU.

        Processes texts in mini-batches with padding for maximum throughput.
        Default batch_size=8 sized for GPUs with ~4GB VRAM (e.g. RTX 3050).
        """
        results = []

        for i in range(0, len(texts), batch_size):
            batch_texts = texts[i:i + batch_size]

            # Tokenise with padding
            encodings = self.gpt2_tokenizer(
                batch_texts,
                return_tensors="pt",
                truncation=True,
                max_length=max_len,
                padding=True,
            )

            input_ids = encodings["input_ids"].to(self.device)
            attention_mask = encodings["attention_mask"].to(self.device)

            with torch.no_grad(), torch.amp.autocast(device_type=self.device if self.device != "cpu" else "cpu"):
                # Forward pass for the whole batch
                outputs = self.gpt2_model(
                    input_ids,
                    attention_mask=attention_mask,
                )
                logits = outputs.logits

            # Compute per-sample perplexity from logits
            # Shift logits and labels for causal LM loss
            shift_logits = logits[:, :-1, :].contiguous()
            shift_labels = input_ids[:, 1:].contiguous()
            shift_mask = attention_mask[:, 1:].contiguous()

            # Per-token cross entropy (no reduction)
            loss_fct = nn.CrossEntropyLoss(reduction="none")
            # Reshape for loss computation
            per_token_loss = loss_fct(
                shift_logits.view(-1, shift_logits.size(-1)),
                shift_labels.view(-1),
            ).view(shift_labels.size())

            # Mask out padding tokens and compute mean per sample
            masked_loss = per_token_loss * shift_mask.float()
            token_counts = shift_mask.float().sum(dim=1).clamp(min=1)
            per_sample_loss = masked_loss.sum(dim=1) / token_counts

            # Convert to perplexity
            for loss_val in per_sample_loss:
                ppl = math.exp(min(loss_val.float().item(), 10))
                results.append(ppl)

            # Free GPU memory between batches (critical for low-VRAM GPUs)
            del input_ids, attention_mask, outputs, logits, shift_logits, shift_labels
            del shift_mask, per_token_loss, masked_loss, token_counts, per_sample_loss
            if self.device == "cuda":
                torch.cuda.empty_cache()

        return results

    def extract(self, text: str) -> np.ndarray:
        """Extract full 17-dimensional feature vector for a single text."""
        if not text or not text.strip():
            return np.zeros(17, dtype=np.float32)

        # Perplexity (feature 1)
        ppl = self._perplexity(text)

        # All other features (features 2-17)
        text_features = _compute_text_features(text)

        # Combine: [perplexity, ...16 text features]
        features = np.empty(17, dtype=np.float32)
        features[0] = ppl
        features[1:] = text_features

        return features

    def extract_batch(
        self,
        texts: List[str],
        batch_size: Optional[int] = None,
        num_workers: int = 0,
        progress_every: int = 1000,
    ) -> np.ndarray:
        """Extract features for many texts efficiently.

        Strategy:
          1. Compute perplexity in batched GPU forward passes
          2. Compute text features in parallel via multiprocessing
          3. Merge into (N, 17) array

        Args:
            texts: List of text strings
            batch_size: Batch size for GPT-2 perplexity (default 8 for ~4GB VRAM GPUs)
            num_workers: Number of processes for text features. 0 = auto-detect.
            progress_every: Log progress every N texts

        Returns:
            np.ndarray of shape (len(texts), 17)
        """
        n = len(texts)
        if batch_size is None:
            # Auto-size: 8 for 4GB VRAM, 16 for 8GB, 32 for 16GB+
            if self.device == "cuda":
                vram_gb = torch.cuda.get_device_properties(0).total_memory / (1024**3)
                batch_size = max(4, min(32, int(vram_gb)))
            else:
                batch_size = 4
        logger.info(f"Extracting features for {n} texts (device={self.device}, batch_size={batch_size})")

        # ── Step 1: Batched perplexity on GPU ──────────────────────────────
        logger.info("  Computing batched GPT-2 perplexity...")
        all_ppl = []
        for start in range(0, n, batch_size):
            end = min(start + batch_size, n)
            batch = texts[start:end]
            ppl_batch = self._perplexity_batch(batch, batch_size=len(batch))
            all_ppl.extend(ppl_batch)

            if (start // batch_size) % max(1, (progress_every // batch_size)) == 0 and start > 0:
                logger.info(f"    Perplexity: {start}/{n}")

        logger.info(f"    Perplexity complete: {n}/{n}")

        # ── Step 2: Text features in parallel ──────────────────────────────
        logger.info("  Computing text features (parallel)...")
        if num_workers == 0:
            num_workers = min(mp.cpu_count(), 8)

        # For small datasets or if multiprocessing causes issues, fall back to serial
        if n < 500 or num_workers <= 1:
            text_features_list = []
            for i, text in enumerate(texts):
                text_features_list.append(_compute_text_features(text))
                if i > 0 and i % progress_every == 0:
                    logger.info(f"    Text features: {i}/{n}")
        else:
            # Use ProcessPoolExecutor for CPU-bound text feature extraction
            text_features_list = []
            with ProcessPoolExecutor(max_workers=num_workers) as executor:
                # Submit in chunks for better progress tracking
                chunk_size = 2000
                for chunk_start in range(0, n, chunk_size):
                    chunk_end = min(chunk_start + chunk_size, n)
                    chunk = texts[chunk_start:chunk_end]
                    chunk_results = list(executor.map(_compute_text_features, chunk, chunksize=200))
                    text_features_list.extend(chunk_results)
                    if chunk_start > 0:
                        logger.info(f"    Text features: {chunk_start}/{n}")

        logger.info(f"    Text features complete: {n}/{n}")

        # ── Step 3: Merge ──────────────────────────────────────────────────
        features = np.empty((n, 17), dtype=np.float32)
        features[:, 0] = np.array(all_ppl, dtype=np.float32)
        features[:, 1:] = np.array(text_features_list, dtype=np.float32)

        return features


class KaggleHumanPatternDataset(Dataset):
    """
    Loads both Kaggle datasets and produces (feature_vector, label) pairs.
    label = 1 (human) | 0 (AI)
    """

    def __init__(
        self,
        shanegerami_path: str,
        starblasters_path: str,
        extractor: HumanPatternFeatureExtractor,
        max_samples_per_source: int = 50000,
    ):
        self.extractor = extractor
        self.texts = []
        self.labels = []

        # Load Shanegerami AI_Human.csv
        logger.info(f"Loading Shanegerami dataset from {shanegerami_path}...")
        try:
            df_shane = pd.read_csv(shanegerami_path, nrows=max_samples_per_source * 2)
            # Auto-detect column names
            text_col = None
            label_col = None
            for col in df_shane.columns:
                col_lower = col.lower()
                if col_lower in ("text", "essay_text", "content", "essay"):
                    text_col = col
                elif col_lower in ("generated", "label", "is_ai", "ai_generated", "class"):
                    label_col = col

            if text_col is None:
                text_col = df_shane.columns[0]
                logger.warning(f"Auto-detected text column: {text_col}")
            if label_col is None:
                label_col = df_shane.columns[-1]
                logger.warning(f"Auto-detected label column: {label_col}")

            # Sample balanced dataset
            human_mask = df_shane[label_col] == 0
            ai_mask = df_shane[label_col] == 1

            human_texts = df_shane.loc[human_mask, text_col].dropna().head(max_samples_per_source).tolist()
            ai_texts = df_shane.loc[ai_mask, text_col].dropna().head(max_samples_per_source).tolist()

            self.texts.extend(human_texts)
            self.labels.extend([1] * len(human_texts))  # 1 = human
            self.texts.extend(ai_texts)
            self.labels.extend([0] * len(ai_texts))  # 0 = AI

            logger.info(f"Shanegerami: {len(human_texts)} human + {len(ai_texts)} AI samples")
        except Exception as e:
            logger.warning(f"Failed to load Shanegerami dataset: {e}")

        # Load Starblasters8 data.parquet
        logger.info(f"Loading Starblasters8 dataset from {starblasters_path}...")
        try:
            df_star = pd.read_parquet(starblasters_path)

            # Auto-detect columns
            text_col = None
            label_col = None
            for col in df_star.columns:
                col_lower = col.lower()
                if col_lower in ("text", "essay_text", "content", "essay"):
                    text_col = col
                elif col_lower in ("generated", "label", "is_ai", "ai_generated", "source"):
                    label_col = col

            if text_col is None:
                text_col = df_star.columns[0]
            if label_col is None:
                label_col = df_star.columns[-1]

            human_mask = df_star[label_col] == 0
            ai_mask = df_star[label_col] == 1

            human_texts = df_star.loc[human_mask, text_col].dropna().head(max_samples_per_source).tolist()
            ai_texts = df_star.loc[ai_mask, text_col].dropna().head(max_samples_per_source).tolist()

            self.texts.extend(human_texts)
            self.labels.extend([1] * len(human_texts))
            self.texts.extend(ai_texts)
            self.labels.extend([0] * len(ai_texts))

            logger.info(f"Starblasters8: {len(human_texts)} human + {len(ai_texts)} AI samples")
        except Exception as e:
            logger.warning(f"Failed to load Starblasters8 dataset: {e}")

        logger.info(f"Total dataset size: {len(self.texts)} samples")

        # Pre-extract features for all texts (cached for training speed)
        self._features = None
        self._precomputed = False

    def precompute_features(self):
        """Pre-compute all features using optimised batched extraction."""
        if self._precomputed:
            return

        logger.info("Pre-computing features for all texts...")

        # Truncate very long texts for speed
        truncated_texts = [
            str(text)[:2000] if len(str(text)) > 2000 else str(text)
            for text in self.texts
        ]

        # Use the fast batched extraction path
        features_array = self.extractor.extract_batch(
            truncated_texts,
            batch_size=None,  # Auto-detect based on VRAM
            num_workers=0,  # Auto-detect CPU count
            progress_every=2000,
        )

        # Store as list of arrays for compatibility with __getitem__
        self._features = [features_array[i] for i in range(len(features_array))]
        self._precomputed = True
        logger.info("Feature pre-computation complete")

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx: int) -> Tuple[torch.Tensor, int]:
        if self._precomputed and self._features is not None:
            features = self._features[idx]
        else:
            text = str(self.texts[idx])[:2000]
            features = self.extractor.extract(text)

        features_tensor = torch.tensor(features, dtype=torch.float32)

        # Handle NaN/Inf values that can occur from edge cases
        features_tensor = torch.nan_to_num(features_tensor, nan=0.0, posinf=10.0, neginf=-10.0)

        return features_tensor, self.labels[idx]


class HumanPatternClassifier(nn.Module):
    """
    Lightweight MLP trained to distinguish human from AI writing.
    Input: feature vector from HumanPatternFeatureExtractor
    Output: probability that text is human-written (0 to 1)

    PRE-TRAINED on Kaggle datasets, then FROZEN during main training.
    """

    def __init__(self, input_dim: int = 17, hidden_dim: int = 128):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.BatchNorm1d(hidden_dim),
            nn.ReLU(),
            nn.Dropout(0.3),
            nn.Linear(hidden_dim, hidden_dim // 2),
            nn.BatchNorm1d(hidden_dim // 2),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(hidden_dim // 2, 1),
        )

    def forward(self, features: torch.Tensor) -> torch.Tensor:
        """Returns human-likeness score in [0, 1]. Higher = more human."""
        logits = self.net(features)
        return torch.sigmoid(logits).squeeze(-1)

    def score(self, text: str, extractor: HumanPatternFeatureExtractor) -> float:
        """Convenience: score a single text string."""
        self.eval()
        features = extractor.extract(text)
        features_tensor = torch.tensor(features, dtype=torch.float32).unsqueeze(0)
        features_tensor = torch.nan_to_num(features_tensor, nan=0.0, posinf=10.0, neginf=-10.0)
        with torch.no_grad():
            score = self.forward(features_tensor)
        return score.item()