detection_base / utils /schemas.py
Zhen Ye
chore: remove dead code and unused frontend modules
d74c718
from pydantic import BaseModel, Field
from typing import List, Literal
# --- Mission-Driven Abstractions ---
class RelevanceCriteria(BaseModel):
"""Deterministic boolean predicate for filtering detections against a mission.
This is the ONLY input to evaluate_relevance(). It intentionally excludes
context_phrases, stripped_modifiers, and all LLM-derived context so that
relevance filtering remains purely deterministic (INV-13).
"""
required_classes: List[str] = Field(
..., min_length=1,
description="Object categories that satisfy the mission. "
"Detections whose label is not in this list are excluded."
)
min_confidence: float = Field(
default=0.0, ge=0.0, le=1.0,
description="Minimum detector confidence to consider a detection relevant."
)
class MissionSpecification(BaseModel):
"""Structured representation of operator intent.
Created once from raw mission text at the API boundary (app.py).
Forwarded to: detector (object_classes), GPT (full spec), chat (full spec),
relevance gate (relevance_criteria only — INV-13).
INVARIANT INV-13: context_phrases are forwarded to LLM reasoning layers
(GPT threat assessment, threat chat) as situational context ONLY.
They must NEVER be used in evaluate_relevance(), prioritization,
or any deterministic filtering/sorting logic.
"""
# --- Extracted by LLM or fast-path ---
object_classes: List[str] = Field(
..., min_length=1,
description="Concrete, visually detectable object categories to detect. "
"These become detector queries. Must be nouns, not adjectives or verbs."
)
mission_intent: Literal[
"DETECT", "CLASSIFY", "TRACK", "ASSESS_THREAT", "MONITOR"
] = Field(
...,
description="Operator purpose. DETECT=find objects, CLASSIFY=identify type, "
"TRACK=follow over time, ASSESS_THREAT=evaluate danger, MONITOR=passive watch."
)
domain: Literal[
"NAVAL", "GROUND", "AERIAL", "URBAN", "GENERIC"
] = Field(
...,
description="Operational domain. Selects the GPT assessment schema and system prompt."
)
domain_source: Literal["INFERRED", "OPERATOR_SET"] = Field(
default="INFERRED",
description="Whether domain was LLM-inferred or explicitly set by operator."
)
# --- Deterministic (derived from object_classes) ---
relevance_criteria: RelevanceCriteria = Field(
...,
description="Boolean predicate for filtering detections. "
"Built deterministically from object_classes after extraction."
)
# --- Context preservation ---
context_phrases: List[str] = Field(
default_factory=list,
description="Non-class contextual phrases from mission text. "
"E.g., 'approaching from the east', 'near the harbor'. "
"Forwarded to GPT as situational context, NOT used for detection."
)
stripped_modifiers: List[str] = Field(
default_factory=list,
description="Adjectives/modifiers removed during extraction. "
"E.g., 'hostile', 'suspicious', 'friendly'. Logged for audit."
)
operator_text: str = Field(
...,
description="Original unmodified mission text from the operator. Preserved for audit."
)
# --- Parse mode ---
parse_mode: Literal["FAST_PATH", "LLM_EXTRACTED"] = Field(
default="FAST_PATH",
description="How this spec was created. FAST_PATH = comma-separated labels, "
"LLM_EXTRACTED = natural language parsed by GPT."
)
# --- LLM self-assessment ---
parse_confidence: Literal["HIGH", "MEDIUM", "LOW"] = Field(
...,
description="Confidence in the extraction. "
"LOW = could not reliably extract classes -> triggers rejection."
)
parse_warnings: List[str] = Field(
default_factory=list,
description="Specific issues encountered during extraction. "
"E.g., 'term \"threat\" is not a visual class, stripped'."
)
class AssessmentStatus:
"""Canonical string constants for detection assessment lifecycle."""
ASSESSED = "ASSESSED"
UNASSESSED = "UNASSESSED"
PENDING_GPT = "PENDING_GPT"
SKIPPED_POLICY = "SKIPPED_POLICY"
REFUSED = "REFUSED"
ERROR = "ERROR"
NO_RESPONSE = "NO_RESPONSE"
STALE = "STALE"