MikelWL commited on
Commit
6bce3a4
·
1 Parent(s): 28613fa

Feat: surveyor core knobs (compiled prompt + UI)

Browse files
backend/api/conversation_routes.py CHANGED
@@ -2,7 +2,7 @@
2
 
3
  from __future__ import annotations
4
 
5
- from typing import Dict, Optional
6
 
7
  from fastapi import APIRouter, HTTPException
8
  from pydantic import BaseModel, Field
@@ -26,6 +26,10 @@ class StartConversationRequest(BaseModel):
26
  default=None,
27
  description="Extra instructions appended to the patient system prompt for this run",
28
  )
 
 
 
 
29
 
30
 
31
  class ConversationStatusResponse(BaseModel):
@@ -49,6 +53,7 @@ async def start_conversation(request: StartConversationRequest) -> Dict[str, str
49
  model=request.model,
50
  surveyor_prompt_addition=request.surveyor_prompt_addition,
51
  patient_prompt_addition=request.patient_prompt_addition,
 
52
  )
53
  if success:
54
  return {"message": "Conversation started successfully", "conversation_id": request.conversation_id}
@@ -94,4 +99,3 @@ async def list_conversations() -> Dict[str, Dict]:
94
  return await service.list_active_conversations()
95
  except Exception as e:
96
  raise HTTPException(status_code=500, detail=f"Internal error listing conversations: {str(e)}")
97
-
 
2
 
3
  from __future__ import annotations
4
 
5
+ from typing import Dict, Optional, Any
6
 
7
  from fastapi import APIRouter, HTTPException
8
  from pydantic import BaseModel, Field
 
26
  default=None,
27
  description="Extra instructions appended to the patient system prompt for this run",
28
  )
29
+ surveyor_knobs: Optional[Dict[str, Any]] = Field(
30
+ default=None,
31
+ description="Structured surveyor configuration knobs (compiled into the surveyor system prompt)",
32
+ )
33
 
34
 
35
  class ConversationStatusResponse(BaseModel):
 
53
  model=request.model,
54
  surveyor_prompt_addition=request.surveyor_prompt_addition,
55
  patient_prompt_addition=request.patient_prompt_addition,
56
+ surveyor_knobs=request.surveyor_knobs,
57
  )
58
  if success:
59
  return {"message": "Conversation started successfully", "conversation_id": request.conversation_id}
 
99
  return await service.list_active_conversations()
100
  except Exception as e:
101
  raise HTTPException(status_code=500, detail=f"Internal error listing conversations: {str(e)}")
 
backend/api/conversation_service.py CHANGED
@@ -42,6 +42,7 @@ from backend.core.persona_system import PersonaSystem # noqa: E402
42
  from .conversation_ws import ConnectionManager # noqa: E402
43
  from .storage_service import get_run_store # noqa: E402
44
  from backend.storage import RunRecord # noqa: E402
 
45
 
46
  # Setup logging
47
  logger = logging.getLogger(__name__)
@@ -271,6 +272,7 @@ class ConversationInfo:
271
  stop_requested: bool = False
272
  surveyor_prompt_addition: Optional[str] = None
273
  patient_prompt_addition: Optional[str] = None
 
274
 
275
 
276
  @dataclass
@@ -288,6 +290,7 @@ class HumanChatInfo:
288
  stop_requested: bool = False
289
  surveyor_prompt_addition: Optional[str] = None
290
  patient_prompt_addition: Optional[str] = None
 
291
  lock: asyncio.Lock = field(default_factory=asyncio.Lock)
292
  client: Any = None
293
 
@@ -328,6 +331,7 @@ class ConversationService:
328
  model: Optional[str] = None,
329
  surveyor_prompt_addition: Optional[str] = None,
330
  patient_prompt_addition: Optional[str] = None,
 
331
  ) -> bool:
332
  """Start a new human-to-surveyor chat session."""
333
  if conversation_id in self.active_conversations or conversation_id in self.active_human_chats:
@@ -353,6 +357,7 @@ class ConversationService:
353
  llm_backend=resolved_backend,
354
  surveyor_prompt_addition=surveyor_prompt_addition,
355
  patient_prompt_addition=patient_prompt_addition,
 
356
  status=ConversationStatus.STARTING,
357
  created_at=datetime.now(),
358
  )
@@ -501,6 +506,10 @@ class ConversationService:
501
  user_prompt=user_prompt,
502
  )
503
 
 
 
 
 
504
  patient_persona = self.persona_system.get_persona(chat_info.patient_persona_id) or {}
505
  try:
506
  patient_context = self.persona_system.prompt_builder.build_system_prompt(patient_persona)
@@ -532,7 +541,8 @@ class ConversationService:
532
  host: Optional[str] = None,
533
  model: Optional[str] = None,
534
  surveyor_prompt_addition: Optional[str] = None,
535
- patient_prompt_addition: Optional[str] = None) -> bool:
 
536
  """Start a new AI-to-AI conversation.
537
 
538
  Args:
@@ -576,6 +586,7 @@ class ConversationService:
576
  llm_backend=resolved_backend,
577
  surveyor_prompt_addition=surveyor_prompt_addition,
578
  patient_prompt_addition=patient_prompt_addition,
 
579
  status=ConversationStatus.STARTING,
580
  created_at=datetime.now()
581
  )
@@ -598,6 +609,7 @@ class ConversationService:
598
  llm_parameters=llm_parameters,
599
  surveyor_prompt_addition=surveyor_prompt_addition,
600
  patient_prompt_addition=patient_prompt_addition,
 
601
  )
602
 
603
  # Start conversation streaming task
@@ -863,6 +875,7 @@ class ConversationService:
863
  "patient_persona_id": conv_info.patient_persona_id,
864
  "surveyor_prompt_addition": getattr(conv_info, "surveyor_prompt_addition", None),
865
  "patient_prompt_addition": getattr(conv_info, "patient_prompt_addition", None),
 
866
  },
867
  }
868
 
 
42
  from .conversation_ws import ConnectionManager # noqa: E402
43
  from .storage_service import get_run_store # noqa: E402
44
  from backend.storage import RunRecord # noqa: E402
45
+ from backend.core.surveyor_knobs import compile_surveyor_overlay # noqa: E402
46
 
47
  # Setup logging
48
  logger = logging.getLogger(__name__)
 
272
  stop_requested: bool = False
273
  surveyor_prompt_addition: Optional[str] = None
274
  patient_prompt_addition: Optional[str] = None
275
+ surveyor_knobs: Optional[Dict[str, Any]] = None
276
 
277
 
278
  @dataclass
 
290
  stop_requested: bool = False
291
  surveyor_prompt_addition: Optional[str] = None
292
  patient_prompt_addition: Optional[str] = None
293
+ surveyor_knobs: Optional[Dict[str, Any]] = None
294
  lock: asyncio.Lock = field(default_factory=asyncio.Lock)
295
  client: Any = None
296
 
 
331
  model: Optional[str] = None,
332
  surveyor_prompt_addition: Optional[str] = None,
333
  patient_prompt_addition: Optional[str] = None,
334
+ surveyor_knobs: Optional[Dict[str, Any]] = None,
335
  ) -> bool:
336
  """Start a new human-to-surveyor chat session."""
337
  if conversation_id in self.active_conversations or conversation_id in self.active_human_chats:
 
357
  llm_backend=resolved_backend,
358
  surveyor_prompt_addition=surveyor_prompt_addition,
359
  patient_prompt_addition=patient_prompt_addition,
360
+ surveyor_knobs=surveyor_knobs if isinstance(surveyor_knobs, dict) else None,
361
  status=ConversationStatus.STARTING,
362
  created_at=datetime.now(),
363
  )
 
506
  user_prompt=user_prompt,
507
  )
508
 
509
+ overlay = compile_surveyor_overlay(chat_info.surveyor_knobs)
510
+ if overlay:
511
+ system_prompt = (system_prompt + "\n\n" + overlay).strip()
512
+
513
  patient_persona = self.persona_system.get_persona(chat_info.patient_persona_id) or {}
514
  try:
515
  patient_context = self.persona_system.prompt_builder.build_system_prompt(patient_persona)
 
541
  host: Optional[str] = None,
542
  model: Optional[str] = None,
543
  surveyor_prompt_addition: Optional[str] = None,
544
+ patient_prompt_addition: Optional[str] = None,
545
+ surveyor_knobs: Optional[Dict[str, Any]] = None) -> bool:
546
  """Start a new AI-to-AI conversation.
547
 
548
  Args:
 
586
  llm_backend=resolved_backend,
587
  surveyor_prompt_addition=surveyor_prompt_addition,
588
  patient_prompt_addition=patient_prompt_addition,
589
+ surveyor_knobs=surveyor_knobs if isinstance(surveyor_knobs, dict) else None,
590
  status=ConversationStatus.STARTING,
591
  created_at=datetime.now()
592
  )
 
609
  llm_parameters=llm_parameters,
610
  surveyor_prompt_addition=surveyor_prompt_addition,
611
  patient_prompt_addition=patient_prompt_addition,
612
+ surveyor_knobs=surveyor_knobs if isinstance(surveyor_knobs, dict) else None,
613
  )
614
 
615
  # Start conversation streaming task
 
875
  "patient_persona_id": conv_info.patient_persona_id,
876
  "surveyor_prompt_addition": getattr(conv_info, "surveyor_prompt_addition", None),
877
  "patient_prompt_addition": getattr(conv_info, "patient_prompt_addition", None),
878
+ "surveyor_knobs": getattr(conv_info, "surveyor_knobs", None),
879
  },
880
  }
881
 
backend/api/conversation_ws.py CHANGED
@@ -335,6 +335,7 @@ async def handle_start_conversation(data: dict, conversation_id: str):
335
  model = data.get("model")
336
  surveyor_prompt_addition = data.get("surveyor_prompt_addition")
337
  patient_prompt_addition = data.get("patient_prompt_addition")
 
338
 
339
  if not surveyor_persona_id or not patient_persona_id:
340
  await manager.send_to_conversation(conversation_id, {
@@ -353,6 +354,7 @@ async def handle_start_conversation(data: dict, conversation_id: str):
353
  model=model,
354
  surveyor_prompt_addition=surveyor_prompt_addition,
355
  patient_prompt_addition=patient_prompt_addition,
 
356
  )
357
 
358
  if success:
@@ -386,6 +388,7 @@ async def handle_start_human_chat(data: dict, conversation_id: str):
386
  model = data.get("model")
387
  surveyor_prompt_addition = data.get("surveyor_prompt_addition")
388
  patient_prompt_addition = data.get("patient_prompt_addition")
 
389
 
390
  if not surveyor_persona_id or not patient_persona_id:
391
  await manager.send_to_conversation(conversation_id, {
@@ -403,6 +406,7 @@ async def handle_start_human_chat(data: dict, conversation_id: str):
403
  model=model,
404
  surveyor_prompt_addition=surveyor_prompt_addition,
405
  patient_prompt_addition=patient_prompt_addition,
 
406
  )
407
 
408
  if success:
 
335
  model = data.get("model")
336
  surveyor_prompt_addition = data.get("surveyor_prompt_addition")
337
  patient_prompt_addition = data.get("patient_prompt_addition")
338
+ surveyor_knobs = data.get("surveyor_knobs")
339
 
340
  if not surveyor_persona_id or not patient_persona_id:
341
  await manager.send_to_conversation(conversation_id, {
 
354
  model=model,
355
  surveyor_prompt_addition=surveyor_prompt_addition,
356
  patient_prompt_addition=patient_prompt_addition,
357
+ surveyor_knobs=surveyor_knobs,
358
  )
359
 
360
  if success:
 
388
  model = data.get("model")
389
  surveyor_prompt_addition = data.get("surveyor_prompt_addition")
390
  patient_prompt_addition = data.get("patient_prompt_addition")
391
+ surveyor_knobs = data.get("surveyor_knobs")
392
 
393
  if not surveyor_persona_id or not patient_persona_id:
394
  await manager.send_to_conversation(conversation_id, {
 
406
  model=model,
407
  surveyor_prompt_addition=surveyor_prompt_addition,
408
  patient_prompt_addition=patient_prompt_addition,
409
+ surveyor_knobs=surveyor_knobs,
410
  )
411
 
412
  if success:
backend/core/conversation_manager.py CHANGED
@@ -31,6 +31,7 @@ sys.path.insert(0, str(Path(__file__).parent.parent))
31
 
32
  from backend.core.llm_client import create_llm_client
33
  from backend.core.persona_system import PersonaSystem
 
34
 
35
  SURVEYOR_MAX_TOKENS = 140
36
  PATIENT_MAX_TOKENS = 240
@@ -81,6 +82,7 @@ class ConversationManager:
81
  patient_persona: dict = None,
82
  surveyor_prompt_addition: Optional[str] = None,
83
  patient_prompt_addition: Optional[str] = None,
 
84
  host: str = "http://localhost:11434",
85
  model: str = "llama3.2:latest",
86
  llm_backend: str = "ollama",
@@ -129,6 +131,7 @@ class ConversationManager:
129
  self.turn_count = 0
130
  self.surveyor_prompt_addition = (surveyor_prompt_addition or "").strip()
131
  self.patient_prompt_addition = (patient_prompt_addition or "").strip()
 
132
 
133
  def _generate_conversation_id(self) -> str:
134
  """Generate unique conversation identifier."""
@@ -214,6 +217,9 @@ class ConversationManager:
214
  conversation_history=conversation_history,
215
  user_prompt=user_prompt
216
  )
 
 
 
217
  if self.surveyor_prompt_addition:
218
  system_prompt = f"{system_prompt}\n\nAdditional instructions:\n{self.surveyor_prompt_addition}"
219
 
 
31
 
32
  from backend.core.llm_client import create_llm_client
33
  from backend.core.persona_system import PersonaSystem
34
+ from backend.core.surveyor_knobs import compile_surveyor_overlay
35
 
36
  SURVEYOR_MAX_TOKENS = 140
37
  PATIENT_MAX_TOKENS = 240
 
82
  patient_persona: dict = None,
83
  surveyor_prompt_addition: Optional[str] = None,
84
  patient_prompt_addition: Optional[str] = None,
85
+ surveyor_knobs: Optional[Dict[str, Any]] = None,
86
  host: str = "http://localhost:11434",
87
  model: str = "llama3.2:latest",
88
  llm_backend: str = "ollama",
 
131
  self.turn_count = 0
132
  self.surveyor_prompt_addition = (surveyor_prompt_addition or "").strip()
133
  self.patient_prompt_addition = (patient_prompt_addition or "").strip()
134
+ self.surveyor_knobs = surveyor_knobs if isinstance(surveyor_knobs, dict) else None
135
 
136
  def _generate_conversation_id(self) -> str:
137
  """Generate unique conversation identifier."""
 
217
  conversation_history=conversation_history,
218
  user_prompt=user_prompt
219
  )
220
+ overlay = compile_surveyor_overlay(self.surveyor_knobs)
221
+ if overlay:
222
+ system_prompt = f"{system_prompt}\n\n{overlay}"
223
  if self.surveyor_prompt_addition:
224
  system_prompt = f"{system_prompt}\n\nAdditional instructions:\n{self.surveyor_prompt_addition}"
225
 
backend/core/surveyor_knobs.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any, Dict, List, Optional
4
+
5
+
6
+ def _as_str_list(value: Any) -> List[str]:
7
+ if not value:
8
+ return []
9
+ if isinstance(value, list):
10
+ out: List[str] = []
11
+ for item in value:
12
+ if isinstance(item, str) and item.strip():
13
+ out.append(item.strip())
14
+ return out
15
+ return []
16
+
17
+
18
+ def compile_surveyor_overlay(knobs: Optional[Dict[str, Any]]) -> str:
19
+ """Compile structured surveyor knobs into a deterministic prompt overlay."""
20
+ if not isinstance(knobs, dict) or not knobs:
21
+ return ""
22
+
23
+ stance = knobs.get("stance")
24
+ question_strategy = knobs.get("question_strategy")
25
+ empathy_style = knobs.get("empathy_style")
26
+ off_track_handling = knobs.get("off_track_handling")
27
+
28
+ probing_policy = _as_str_list(knobs.get("probing_policy"))
29
+ sensitivity_handling = _as_str_list(knobs.get("sensitivity_handling"))
30
+
31
+ lines: List[str] = []
32
+ lines.append("Surveyor configuration (structured knobs):")
33
+
34
+ def add(label: str, value: Any):
35
+ if isinstance(value, str) and value.strip():
36
+ lines.append(f"- {label}: {value.strip()}")
37
+
38
+ add("stance", stance)
39
+ add("question_strategy", question_strategy)
40
+ add("empathy_style", empathy_style)
41
+ add("off_track_handling", off_track_handling)
42
+
43
+ if probing_policy:
44
+ lines.append("- probing_policy:")
45
+ for item in probing_policy:
46
+ lines.append(f" - {item}")
47
+
48
+ if sensitivity_handling:
49
+ lines.append("- sensitivity_handling:")
50
+ for item in sensitivity_handling:
51
+ lines.append(f" - {item}")
52
+
53
+ return "\n".join(lines).strip()
54
+
docs/development.md CHANGED
@@ -74,6 +74,7 @@ The UI also includes a **Configuration** view that lets you select personas and
74
  - No automated test suite yet. Add lightweight `pytest` modules under `tests/` as you extend functionality.
75
  - Manually verify through the primary web UI (`frontend/react_gradio_hybrid.py`).
76
  - For persistence + history: complete a run, confirm it appears in **History**, restart the container, and confirm it still appears and exports download.
 
77
  - If you need to debug the conversation loop, instrument `backend/core/conversation_manager.py` or launch a shell and run it directly.
78
 
79
  ## Notes on Persistence (HF)
 
74
  - No automated test suite yet. Add lightweight `pytest` modules under `tests/` as you extend functionality.
75
  - Manually verify through the primary web UI (`frontend/react_gradio_hybrid.py`).
76
  - For persistence + history: complete a run, confirm it appears in **History**, restart the container, and confirm it still appears and exports download.
77
+ - For config knobs (surveyor v1): change surveyor settings in **Configuration**, run an AI↔AI session, and confirm behavior differs and the completed run’s `config.personas.surveyor_knobs` is present when fetched from `/api/runs/{run_id}`.
78
  - If you need to debug the conversation loop, instrument `backend/core/conversation_manager.py` or launch a shell and run it directly.
79
 
80
  ## Notes on Persistence (HF)
frontend/pages/config_view.py CHANGED
@@ -8,6 +8,14 @@ def get_config_view_js() -> str:
8
  const [selectedPatientId, setSelectedPatientId] = React.useState(existing?.patient_persona_id || 'cooperative_senior_001');
9
  const [surveyorPromptAddition, setSurveyorPromptAddition] = React.useState(existing?.surveyor_prompt_addition || '');
10
  const [patientPromptAddition, setPatientPromptAddition] = React.useState(existing?.patient_prompt_addition || '');
 
 
 
 
 
 
 
 
11
  const [savedAt, setSavedAt] = React.useState(existing?.saved_at || null);
12
 
13
  React.useEffect(() => {
@@ -28,6 +36,7 @@ def get_config_view_js() -> str:
28
  patient_persona_id: selectedPatientId,
29
  surveyor_prompt_addition: surveyorPromptAddition,
30
  patient_prompt_addition: patientPromptAddition,
 
31
  saved_at: new Date().toISOString()
32
  };
33
  localStorage.setItem(STORAGE_KEY, JSON.stringify(cfg));
@@ -58,6 +67,105 @@ def get_config_view_js() -> str:
58
  ))}
59
  </select>
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  <label className="block text-sm font-semibold text-slate-700 mt-4 mb-2">Surveyor prompt addition (optional)</label>
62
  <textarea
63
  className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white h-28"
@@ -86,6 +194,36 @@ def get_config_view_js() -> str:
86
  value={patientPromptAddition}
87
  onChange={(e) => setPatientPromptAddition(e.target.value)}
88
  />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  </div>
90
  </div>
91
 
 
8
  const [selectedPatientId, setSelectedPatientId] = React.useState(existing?.patient_persona_id || 'cooperative_senior_001');
9
  const [surveyorPromptAddition, setSurveyorPromptAddition] = React.useState(existing?.surveyor_prompt_addition || '');
10
  const [patientPromptAddition, setPatientPromptAddition] = React.useState(existing?.patient_prompt_addition || '');
11
+ const [surveyorKnobs, setSurveyorKnobs] = React.useState(existing?.surveyor_knobs || {
12
+ stance: 'empathetic_researcher',
13
+ question_strategy: 'conversational_cover_all',
14
+ probing_policy: ['reflect_emotion', 'summarize_and_confirm'],
15
+ empathy_style: 'validating',
16
+ off_track_handling: 'gentle_redirect',
17
+ sensitivity_handling: ['acknowledge', 'normalize', 'avoid_advice']
18
+ });
19
  const [savedAt, setSavedAt] = React.useState(existing?.saved_at || null);
20
 
21
  React.useEffect(() => {
 
36
  patient_persona_id: selectedPatientId,
37
  surveyor_prompt_addition: surveyorPromptAddition,
38
  patient_prompt_addition: patientPromptAddition,
39
+ surveyor_knobs: surveyorKnobs,
40
  saved_at: new Date().toISOString()
41
  };
42
  localStorage.setItem(STORAGE_KEY, JSON.stringify(cfg));
 
67
  ))}
68
  </select>
69
 
70
+ <div className="mt-4">
71
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Surveyor stance</label>
72
+ <select
73
+ className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
74
+ value={surveyorKnobs.stance || 'empathetic_researcher'}
75
+ onChange={(e) => setSurveyorKnobs(prev => Object.assign({}, prev, { stance: e.target.value }))}
76
+ >
77
+ <option value="empathetic_researcher">Empathetic researcher</option>
78
+ <option value="neutral_clinical">Neutral clinical</option>
79
+ <option value="time_efficient">Time efficient</option>
80
+ </select>
81
+ </div>
82
+
83
+ <div className="mt-4">
84
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Question strategy</label>
85
+ <select
86
+ className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
87
+ value={surveyorKnobs.question_strategy || 'conversational_cover_all'}
88
+ onChange={(e) => setSurveyorKnobs(prev => Object.assign({}, prev, { question_strategy: e.target.value }))}
89
+ >
90
+ <option value="sequential">Sequential</option>
91
+ <option value="adaptive_followups">Adaptive with follow-ups</option>
92
+ <option value="conversational_cover_all">Conversational but cover all</option>
93
+ </select>
94
+ </div>
95
+
96
+ <div className="mt-4">
97
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Empathy style</label>
98
+ <select
99
+ className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
100
+ value={surveyorKnobs.empathy_style || 'validating'}
101
+ onChange={(e) => setSurveyorKnobs(prev => Object.assign({}, prev, { empathy_style: e.target.value }))}
102
+ >
103
+ <option value="validating">Validating</option>
104
+ <option value="neutral">Neutral</option>
105
+ <option value="supportive_brief">Supportive, brief</option>
106
+ </select>
107
+ </div>
108
+
109
+ <div className="mt-4">
110
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Off-track handling</label>
111
+ <select
112
+ className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
113
+ value={surveyorKnobs.off_track_handling || 'gentle_redirect'}
114
+ onChange={(e) => setSurveyorKnobs(prev => Object.assign({}, prev, { off_track_handling: e.target.value }))}
115
+ >
116
+ <option value="gentle_redirect">Gently redirect</option>
117
+ <option value="one_vent_then_redirect">Let them vent once, then redirect</option>
118
+ <option value="hard_redirect">Hard redirect</option>
119
+ </select>
120
+ </div>
121
+
122
+ <div className="mt-4">
123
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Probing policy</label>
124
+ {[
125
+ { key: 'clarify_timeline', label: 'Clarify timeline' },
126
+ { key: 'ask_examples', label: 'Ask examples' },
127
+ { key: 'quantify_frequency_severity', label: 'Quantify frequency/severity' },
128
+ { key: 'reflect_emotion', label: 'Reflect emotion' },
129
+ { key: 'summarize_and_confirm', label: 'Summarize and confirm' }
130
+ ].map((item) => (
131
+ <label key={item.key} className="flex items-center gap-2 text-sm text-slate-700 mt-1">
132
+ <input
133
+ type="checkbox"
134
+ checked={(surveyorKnobs.probing_policy || []).includes(item.key)}
135
+ onChange={(e) => {
136
+ const next = new Set(surveyorKnobs.probing_policy || []);
137
+ if (e.target.checked) next.add(item.key); else next.delete(item.key);
138
+ setSurveyorKnobs(prev => Object.assign({}, prev, { probing_policy: Array.from(next) }));
139
+ }}
140
+ />
141
+ {item.label}
142
+ </label>
143
+ ))}
144
+ </div>
145
+
146
+ <div className="mt-4">
147
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Sensitivity handling</label>
148
+ {[
149
+ { key: 'acknowledge', label: 'Acknowledge' },
150
+ { key: 'offer_skip', label: 'Offer to skip' },
151
+ { key: 'normalize', label: 'Normalize' },
152
+ { key: 'avoid_advice', label: 'Avoid advice' }
153
+ ].map((item) => (
154
+ <label key={item.key} className="flex items-center gap-2 text-sm text-slate-700 mt-1">
155
+ <input
156
+ type="checkbox"
157
+ checked={(surveyorKnobs.sensitivity_handling || []).includes(item.key)}
158
+ onChange={(e) => {
159
+ const next = new Set(surveyorKnobs.sensitivity_handling || []);
160
+ if (e.target.checked) next.add(item.key); else next.delete(item.key);
161
+ setSurveyorKnobs(prev => Object.assign({}, prev, { sensitivity_handling: Array.from(next) }));
162
+ }}
163
+ />
164
+ {item.label}
165
+ </label>
166
+ ))}
167
+ </div>
168
+
169
  <label className="block text-sm font-semibold text-slate-700 mt-4 mb-2">Surveyor prompt addition (optional)</label>
170
  <textarea
171
  className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white h-28"
 
194
  value={patientPromptAddition}
195
  onChange={(e) => setPatientPromptAddition(e.target.value)}
196
  />
197
+
198
+ <div className="mt-6">
199
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Surveyor prompt preview</label>
200
+ <div className="text-xs text-slate-500 mb-2">
201
+ Best-effort preview; the backend compiles knobs into the surveyor system prompt at runtime.
202
+ </div>
203
+ <pre className="w-full whitespace-pre-wrap text-xs border border-slate-200 rounded-lg bg-slate-50 p-3 max-h-64 overflow-auto">
204
+ {(() => {
205
+ const p = (personas.surveyors || []).find(x => x.id === selectedSurveyorId);
206
+ const base = (p && p.system_prompt) ? p.system_prompt : '';
207
+ const lines = [];
208
+ lines.push('Surveyor configuration (structured knobs):');
209
+ if (surveyorKnobs?.stance) lines.push(`- stance: ${surveyorKnobs.stance}`);
210
+ if (surveyorKnobs?.question_strategy) lines.push(`- question_strategy: ${surveyorKnobs.question_strategy}`);
211
+ if (surveyorKnobs?.empathy_style) lines.push(`- empathy_style: ${surveyorKnobs.empathy_style}`);
212
+ if (surveyorKnobs?.off_track_handling) lines.push(`- off_track_handling: ${surveyorKnobs.off_track_handling}`);
213
+ if ((surveyorKnobs?.probing_policy || []).length) {
214
+ lines.push('- probing_policy:');
215
+ (surveyorKnobs.probing_policy || []).forEach((k) => lines.push(` - ${k}`));
216
+ }
217
+ if ((surveyorKnobs?.sensitivity_handling || []).length) {
218
+ lines.push('- sensitivity_handling:');
219
+ (surveyorKnobs.sensitivity_handling || []).forEach((k) => lines.push(` - ${k}`));
220
+ }
221
+ const overlay = lines.join('\n');
222
+ const addition = (surveyorPromptAddition || '').trim();
223
+ return [base.trim(), overlay.trim(), addition ? `Additional instructions:\n${addition}` : ''].filter(Boolean).join('\n\n');
224
+ })()}
225
+ </pre>
226
+ </div>
227
  </div>
228
  </div>
229
 
frontend/pages/main_page.py CHANGED
@@ -537,7 +537,8 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
537
  surveyor_persona_id: surveyorId,
538
  patient_persona_id: patientId,
539
  surveyor_prompt_addition: cfg.surveyor_prompt_addition || undefined,
540
- patient_prompt_addition: cfg.patient_prompt_addition || undefined
 
541
  }));
542
  }
543
  }, 500);
@@ -566,7 +567,8 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
566
  surveyor_persona_id: surveyorId,
567
  patient_persona_id: patientId,
568
  surveyor_prompt_addition: cfg.surveyor_prompt_addition || undefined,
569
- patient_prompt_addition: cfg.patient_prompt_addition || undefined
 
570
  }));
571
  }
572
  }, 500);
 
537
  surveyor_persona_id: surveyorId,
538
  patient_persona_id: patientId,
539
  surveyor_prompt_addition: cfg.surveyor_prompt_addition || undefined,
540
+ patient_prompt_addition: cfg.patient_prompt_addition || undefined,
541
+ surveyor_knobs: cfg.surveyor_knobs || undefined
542
  }));
543
  }
544
  }, 500);
 
567
  surveyor_persona_id: surveyorId,
568
  patient_persona_id: patientId,
569
  surveyor_prompt_addition: cfg.surveyor_prompt_addition || undefined,
570
+ patient_prompt_addition: cfg.patient_prompt_addition || undefined,
571
+ surveyor_knobs: cfg.surveyor_knobs || undefined
572
  }));
573
  }
574
  }, 500);