Spaces:
Paused
Paused
Zhen Ye Claude Opus 4.6 commited on
Commit ·
69345d4
1
Parent(s): be9d440
feat: add /chat/mission endpoint and rewire chat UI
Browse filesReplace deleted /chat/threat with generic /chat/mission endpoint for
mission analyst Q&A. Add chatAboutMission() to client.js. Update
chat.js to use new endpoint with AI icon label.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- app.py +96 -8
- frontend/js/api/client.js +21 -0
- frontend/js/ui/chat.js +4 -5
app.py
CHANGED
|
@@ -174,18 +174,15 @@ async def add_no_cache_header(request: Request, call_next):
|
|
| 174 |
"""Ensure frontend assets are not cached by the browser (important for HF Spaces updates)."""
|
| 175 |
response = await call_next(request)
|
| 176 |
# Apply to all static files and the root page
|
| 177 |
-
if request.url.path.startswith("/
|
| 178 |
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
| 179 |
response.headers["Pragma"] = "no-cache"
|
| 180 |
response.headers["Expires"] = "0"
|
| 181 |
return response
|
| 182 |
|
| 183 |
-
# Optional: serve the LaserPerception frontend from this backend.
|
| 184 |
-
# The frontend files are now located in the 'frontend' directory.
|
| 185 |
_FRONTEND_DIR = Path(__file__).with_name("frontend")
|
| 186 |
if _FRONTEND_DIR.exists():
|
| 187 |
-
|
| 188 |
-
app.mount("/laser", StaticFiles(directory=_FRONTEND_DIR, html=True), name="laser")
|
| 189 |
|
| 190 |
# Valid detection modes
|
| 191 |
VALID_MODES = {"object_detection", "segmentation", "drone_detection"}
|
|
@@ -237,9 +234,8 @@ def _default_queries_for_mode(mode: str) -> list[str]:
|
|
| 237 |
|
| 238 |
@app.get("/", response_class=HTMLResponse)
|
| 239 |
async def demo_page():
|
| 240 |
-
"""Redirect to
|
| 241 |
-
|
| 242 |
-
return RedirectResponse(url="/laser/index.html")
|
| 243 |
|
| 244 |
|
| 245 |
@app.post("/detect")
|
|
@@ -918,6 +914,98 @@ async def reason_track(
|
|
| 918 |
return results
|
| 919 |
|
| 920 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 921 |
@app.post("/benchmark")
|
| 922 |
async def benchmark_endpoint(
|
| 923 |
video: UploadFile = File(...),
|
|
|
|
| 174 |
"""Ensure frontend assets are not cached by the browser (important for HF Spaces updates)."""
|
| 175 |
response = await call_next(request)
|
| 176 |
# Apply to all static files and the root page
|
| 177 |
+
if request.url.path.startswith("/app") or request.url.path == "/":
|
| 178 |
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
|
| 179 |
response.headers["Pragma"] = "no-cache"
|
| 180 |
response.headers["Expires"] = "0"
|
| 181 |
return response
|
| 182 |
|
|
|
|
|
|
|
| 183 |
_FRONTEND_DIR = Path(__file__).with_name("frontend")
|
| 184 |
if _FRONTEND_DIR.exists():
|
| 185 |
+
app.mount("/app", StaticFiles(directory=_FRONTEND_DIR, html=True), name="frontend")
|
|
|
|
| 186 |
|
| 187 |
# Valid detection modes
|
| 188 |
VALID_MODES = {"object_detection", "segmentation", "drone_detection"}
|
|
|
|
| 234 |
|
| 235 |
@app.get("/", response_class=HTMLResponse)
|
| 236 |
async def demo_page():
|
| 237 |
+
"""Redirect to Mission Console app."""
|
| 238 |
+
return RedirectResponse(url="/app/index.html")
|
|
|
|
| 239 |
|
| 240 |
|
| 241 |
@app.post("/detect")
|
|
|
|
| 914 |
return results
|
| 915 |
|
| 916 |
|
| 917 |
+
@app.post("/chat/mission")
|
| 918 |
+
async def chat_mission_endpoint(
|
| 919 |
+
question: str = Form(...),
|
| 920 |
+
detections: str = Form(...),
|
| 921 |
+
mission_context: str = Form(""),
|
| 922 |
+
):
|
| 923 |
+
"""Chat about detected objects using GPT.
|
| 924 |
+
|
| 925 |
+
Answers operator questions about the current detection results,
|
| 926 |
+
satisfaction reasoning, and mission context.
|
| 927 |
+
"""
|
| 928 |
+
import json as json_module
|
| 929 |
+
from utils.openai_client import chat_completion, extract_content, get_api_key, OpenAIAPIError
|
| 930 |
+
|
| 931 |
+
if not question.strip():
|
| 932 |
+
raise HTTPException(status_code=400, detail="Question cannot be empty.")
|
| 933 |
+
|
| 934 |
+
if not get_api_key():
|
| 935 |
+
raise HTTPException(status_code=503, detail="OpenAI API key not configured.")
|
| 936 |
+
|
| 937 |
+
try:
|
| 938 |
+
detection_list = json_module.loads(detections)
|
| 939 |
+
except json_module.JSONDecodeError:
|
| 940 |
+
raise HTTPException(status_code=400, detail="Invalid detections JSON.")
|
| 941 |
+
|
| 942 |
+
if not isinstance(detection_list, list):
|
| 943 |
+
raise HTTPException(status_code=400, detail="Detections must be a list.")
|
| 944 |
+
|
| 945 |
+
# Build detection context summary
|
| 946 |
+
det_lines = []
|
| 947 |
+
for det in detection_list:
|
| 948 |
+
obj_id = det.get("id", "?")
|
| 949 |
+
label = det.get("label", "object")
|
| 950 |
+
entry = f"[{obj_id}] {label}"
|
| 951 |
+
if det.get("satisfies") is True:
|
| 952 |
+
entry += " — satisfies condition"
|
| 953 |
+
elif det.get("satisfies") is False:
|
| 954 |
+
entry += " — does NOT satisfy condition"
|
| 955 |
+
if det.get("reason"):
|
| 956 |
+
entry += f" ({det['reason']})"
|
| 957 |
+
if det.get("score"):
|
| 958 |
+
entry += f" (conf: {det['score']:.0%})"
|
| 959 |
+
det_lines.append(entry)
|
| 960 |
+
det_context = "\n".join(det_lines) if det_lines else "No detections."
|
| 961 |
+
|
| 962 |
+
# Parse optional mission context
|
| 963 |
+
mission_block = ""
|
| 964 |
+
if mission_context.strip():
|
| 965 |
+
try:
|
| 966 |
+
spec = json_module.loads(mission_context)
|
| 967 |
+
mission_block = "\nMISSION CONTEXT:\n"
|
| 968 |
+
if spec.get("operator_text"):
|
| 969 |
+
mission_block += f"- Query: {spec['operator_text']}\n"
|
| 970 |
+
if spec.get("object_classes"):
|
| 971 |
+
mission_block += f"- Target Classes: {', '.join(spec['object_classes'])}\n"
|
| 972 |
+
if spec.get("satisfaction_condition"):
|
| 973 |
+
mission_block += f"- Condition: {spec['satisfaction_condition']}\n"
|
| 974 |
+
except json_module.JSONDecodeError:
|
| 975 |
+
pass
|
| 976 |
+
|
| 977 |
+
system_prompt = (
|
| 978 |
+
"You are a mission analyst assistant for a video surveillance system. "
|
| 979 |
+
"You have access to the current detection and analysis results. "
|
| 980 |
+
"Answer questions concisely based on the detection data provided.\n\n"
|
| 981 |
+
f"{mission_block}\n"
|
| 982 |
+
f"CURRENT DETECTIONS:\n{det_context}\n\n"
|
| 983 |
+
"Respond to the operator's question based on this data."
|
| 984 |
+
)
|
| 985 |
+
|
| 986 |
+
payload = {
|
| 987 |
+
"model": "gpt-4o",
|
| 988 |
+
"messages": [
|
| 989 |
+
{"role": "system", "content": system_prompt},
|
| 990 |
+
{"role": "user", "content": question},
|
| 991 |
+
],
|
| 992 |
+
"max_tokens": 500,
|
| 993 |
+
"temperature": 0.3,
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
try:
|
| 997 |
+
async with _GPT_SEMAPHORE:
|
| 998 |
+
resp_data = await asyncio.to_thread(chat_completion, payload)
|
| 999 |
+
content, _refusal = extract_content(resp_data)
|
| 1000 |
+
return {"response": content.strip() if content else "No response generated."}
|
| 1001 |
+
except OpenAIAPIError as e:
|
| 1002 |
+
logging.error("Chat API error: %s", e)
|
| 1003 |
+
raise HTTPException(status_code=502, detail=f"API Error: {e}")
|
| 1004 |
+
except Exception as e:
|
| 1005 |
+
logging.exception("Mission chat failed")
|
| 1006 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 1007 |
+
|
| 1008 |
+
|
| 1009 |
@app.post("/benchmark")
|
| 1010 |
async def benchmark_endpoint(
|
| 1011 |
video: UploadFile = File(...),
|
frontend/js/api/client.js
CHANGED
|
@@ -241,6 +241,27 @@ APP.api.client.pollAsyncJob = async function () {
|
|
| 241 |
});
|
| 242 |
};
|
| 243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
// External detection hook (can be replaced by user)
|
| 245 |
APP.api.client.externalDetect = async function (input) {
|
| 246 |
console.log("externalDetect called", input);
|
|
|
|
| 241 |
});
|
| 242 |
};
|
| 243 |
|
| 244 |
+
// Chat about mission detections
|
| 245 |
+
APP.api.client.chatAboutMission = async function (question, detections) {
|
| 246 |
+
const { state } = APP.core;
|
| 247 |
+
const form = new FormData();
|
| 248 |
+
form.append("question", question);
|
| 249 |
+
form.append("detections", JSON.stringify(detections));
|
| 250 |
+
form.append("mission_context", state.missionText || "");
|
| 251 |
+
|
| 252 |
+
const resp = await fetch(`${state.hf.baseUrl}/chat/mission`, {
|
| 253 |
+
method: "POST",
|
| 254 |
+
body: form,
|
| 255 |
+
});
|
| 256 |
+
|
| 257 |
+
if (!resp.ok) {
|
| 258 |
+
const err = await resp.json().catch(() => ({ detail: resp.statusText }));
|
| 259 |
+
return { error: err.detail || "Chat request failed" };
|
| 260 |
+
}
|
| 261 |
+
|
| 262 |
+
return await resp.json();
|
| 263 |
+
};
|
| 264 |
+
|
| 265 |
// External detection hook (can be replaced by user)
|
| 266 |
APP.api.client.externalDetect = async function (input) {
|
| 267 |
console.log("externalDetect called", input);
|
frontend/js/ui/chat.js
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
// Chat UI Module -
|
| 2 |
(function () {
|
| 3 |
const { state } = APP.core;
|
| 4 |
const { $ } = APP.core.utils;
|
|
@@ -40,7 +40,7 @@
|
|
| 40 |
}
|
| 41 |
|
| 42 |
/**
|
| 43 |
-
* Send a chat message about current
|
| 44 |
*/
|
| 45 |
async function sendMessage() {
|
| 46 |
const chatInput = $("#chatInput");
|
|
@@ -69,8 +69,7 @@
|
|
| 69 |
const loadingId = appendMessage("assistant", "Analyzing scene...", true);
|
| 70 |
|
| 71 |
try {
|
| 72 |
-
|
| 73 |
-
const response = { response: "Chat endpoint is not available." };
|
| 74 |
|
| 75 |
// Remove loading message
|
| 76 |
removeMessage(loadingId);
|
|
@@ -111,7 +110,7 @@
|
|
| 111 |
// Format content with line breaks
|
| 112 |
const formatted = content.replace(/\n/g, "<br>");
|
| 113 |
|
| 114 |
-
const icon = role === "user" ? "YOU" : role === "assistant" ? "
|
| 115 |
msgDiv.innerHTML = `<span class="chat-icon">${icon}</span><span class="chat-content">${formatted}</span>`;
|
| 116 |
|
| 117 |
chatMessages.appendChild(msgDiv);
|
|
|
|
| 1 |
+
// Chat UI Module - Mission analyst chat with GPT
|
| 2 |
(function () {
|
| 3 |
const { state } = APP.core;
|
| 4 |
const { $ } = APP.core.utils;
|
|
|
|
| 40 |
}
|
| 41 |
|
| 42 |
/**
|
| 43 |
+
* Send a chat message about current detections.
|
| 44 |
*/
|
| 45 |
async function sendMessage() {
|
| 46 |
const chatInput = $("#chatInput");
|
|
|
|
| 69 |
const loadingId = appendMessage("assistant", "Analyzing scene...", true);
|
| 70 |
|
| 71 |
try {
|
| 72 |
+
const response = await APP.api.client.chatAboutMission(question, state.detections);
|
|
|
|
| 73 |
|
| 74 |
// Remove loading message
|
| 75 |
removeMessage(loadingId);
|
|
|
|
| 110 |
// Format content with line breaks
|
| 111 |
const formatted = content.replace(/\n/g, "<br>");
|
| 112 |
|
| 113 |
+
const icon = role === "user" ? "YOU" : role === "assistant" ? "AI" : "SYS";
|
| 114 |
msgDiv.innerHTML = `<span class="chat-icon">${icon}</span><span class="chat-content">${formatted}</span>`;
|
| 115 |
|
| 116 |
chatMessages.appendChild(msgDiv);
|