File size: 4,499 Bytes
0c01887
d74c718
a2ca6f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624478a
 
 
 
 
 
 
a2ca6f9
 
 
 
 
 
 
 
 
 
 
 
bb6e650
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
from pydantic import BaseModel, Field
from typing import List, Literal

# --- Mission-Driven Abstractions ---


class RelevanceCriteria(BaseModel):
    """Deterministic boolean predicate for filtering detections against a mission.

    This is the ONLY input to evaluate_relevance(). It intentionally excludes
    context_phrases, stripped_modifiers, and all LLM-derived context so that
    relevance filtering remains purely deterministic (INV-13).
    """
    required_classes: List[str] = Field(
        ..., min_length=1,
        description="Object categories that satisfy the mission. "
                    "Detections whose label is not in this list are excluded."
    )
    min_confidence: float = Field(
        default=0.0, ge=0.0, le=1.0,
        description="Minimum detector confidence to consider a detection relevant."
    )


class MissionSpecification(BaseModel):
    """Structured representation of operator intent.

    Created once from raw mission text at the API boundary (app.py).
    Forwarded to: detector (object_classes), GPT (full spec), chat (full spec),
    relevance gate (relevance_criteria only — INV-13).

    INVARIANT INV-13: context_phrases are forwarded to LLM reasoning layers
    (GPT threat assessment, threat chat) as situational context ONLY.
    They must NEVER be used in evaluate_relevance(), prioritization,
    or any deterministic filtering/sorting logic.
    """

    # --- Extracted by LLM or fast-path ---
    object_classes: List[str] = Field(
        ..., min_length=1,
        description="Concrete, visually detectable object categories to detect. "
                    "These become detector queries. Must be nouns, not adjectives or verbs."
    )
    mission_intent: Literal[
        "DETECT", "CLASSIFY", "TRACK", "ASSESS_THREAT", "MONITOR"
    ] = Field(
        ...,
        description="Operator purpose. DETECT=find objects, CLASSIFY=identify type, "
                    "TRACK=follow over time, ASSESS_THREAT=evaluate danger, MONITOR=passive watch."
    )
    domain: Literal[
        "NAVAL", "GROUND", "AERIAL", "URBAN", "GENERIC"
    ] = Field(
        ...,
        description="Operational domain. Selects the GPT assessment schema and system prompt."
    )
    domain_source: Literal["INFERRED", "OPERATOR_SET"] = Field(
        default="INFERRED",
        description="Whether domain was LLM-inferred or explicitly set by operator."
    )

    # --- Deterministic (derived from object_classes) ---
    relevance_criteria: RelevanceCriteria = Field(
        ...,
        description="Boolean predicate for filtering detections. "
                    "Built deterministically from object_classes after extraction."
    )

    # --- Context preservation ---
    context_phrases: List[str] = Field(
        default_factory=list,
        description="Non-class contextual phrases from mission text. "
                    "E.g., 'approaching from the east', 'near the harbor'. "
                    "Forwarded to GPT as situational context, NOT used for detection."
    )
    stripped_modifiers: List[str] = Field(
        default_factory=list,
        description="Adjectives/modifiers removed during extraction. "
                    "E.g., 'hostile', 'suspicious', 'friendly'. Logged for audit."
    )
    operator_text: str = Field(
        ...,
        description="Original unmodified mission text from the operator. Preserved for audit."
    )

    # --- Parse mode ---
    parse_mode: Literal["FAST_PATH", "LLM_EXTRACTED"] = Field(
        default="FAST_PATH",
        description="How this spec was created. FAST_PATH = comma-separated labels, "
                    "LLM_EXTRACTED = natural language parsed by GPT."
    )

    # --- LLM self-assessment ---
    parse_confidence: Literal["HIGH", "MEDIUM", "LOW"] = Field(
        ...,
        description="Confidence in the extraction. "
                    "LOW = could not reliably extract classes -> triggers rejection."
    )
    parse_warnings: List[str] = Field(
        default_factory=list,
        description="Specific issues encountered during extraction. "
                    "E.g., 'term \"threat\" is not a visual class, stripped'."
    )


class AssessmentStatus:
    """Canonical string constants for detection assessment lifecycle."""
    ASSESSED = "ASSESSED"
    UNASSESSED = "UNASSESSED"
    PENDING_GPT = "PENDING_GPT"
    SKIPPED_POLICY = "SKIPPED_POLICY"
    REFUSED = "REFUSED"
    ERROR = "ERROR"
    NO_RESPONSE = "NO_RESPONSE"
    STALE = "STALE"