MikelWL commited on
Commit
83cfdf5
·
1 Parent(s): 9b1f914

Feat: patient attributes v1 (local-first)

Browse files
backend/api/conversation_routes.py CHANGED
@@ -20,7 +20,11 @@ class StartConversationRequest(BaseModel):
20
  model: Optional[str] = Field(default=None, description="Override LLM model to use")
21
  patient_prompt_addition: Optional[str] = Field(
22
  default=None,
23
- description="Extra instructions appended to the patient system prompt for this run",
 
 
 
 
24
  )
25
  surveyor_attributes: Optional[List[str]] = Field(
26
  default=None,
@@ -51,7 +55,7 @@ async def start_conversation(request: StartConversationRequest) -> Dict[str, str
51
  patient_persona_id=request.patient_persona_id,
52
  host=request.host,
53
  model=request.model,
54
- patient_prompt_addition=request.patient_prompt_addition,
55
  surveyor_attributes=request.surveyor_attributes,
56
  surveyor_question_bank=request.surveyor_question_bank,
57
  )
 
20
  model: Optional[str] = Field(default=None, description="Override LLM model to use")
21
  patient_prompt_addition: Optional[str] = Field(
22
  default=None,
23
+ description="(Deprecated) Extra instructions appended to the patient system prompt for this run",
24
+ )
25
+ patient_attributes: Optional[List[str]] = Field(
26
+ default=None,
27
+ description="Plain-language patient attributes (bullet lines) compiled into the patient system prompt",
28
  )
29
  surveyor_attributes: Optional[List[str]] = Field(
30
  default=None,
 
55
  patient_persona_id=request.patient_persona_id,
56
  host=request.host,
57
  model=request.model,
58
+ patient_attributes=request.patient_attributes,
59
  surveyor_attributes=request.surveyor_attributes,
60
  surveyor_question_bank=request.surveyor_question_bank,
61
  )
backend/api/conversation_service.py CHANGED
@@ -43,6 +43,7 @@ from .conversation_ws import ConnectionManager # noqa: E402
43
  from .storage_service import get_run_store # noqa: E402
44
  from backend.storage import RunRecord # noqa: E402
45
  from backend.core.surveyor_knobs import compile_surveyor_attributes_overlay, compile_question_bank_overlay # noqa: E402
 
46
 
47
  # Setup logging
48
  logger = logging.getLogger(__name__)
@@ -278,7 +279,7 @@ class ConversationInfo:
278
  message_count: int = 0
279
  task: Optional[asyncio.Task] = None
280
  stop_requested: bool = False
281
- patient_prompt_addition: Optional[str] = None
282
  surveyor_attributes: List[str] = field(default_factory=list)
283
  surveyor_question_bank: Optional[str] = None
284
 
@@ -296,7 +297,7 @@ class HumanChatInfo:
296
  status: ConversationStatus
297
  created_at: datetime
298
  stop_requested: bool = False
299
- patient_prompt_addition: Optional[str] = None
300
  surveyor_attributes: List[str] = field(default_factory=list)
301
  surveyor_question_bank: Optional[str] = None
302
  ai_role: str = "surveyor" # "surveyor" or "patient"
@@ -339,7 +340,7 @@ class ConversationService:
339
  patient_persona_id: str,
340
  host: Optional[str] = None,
341
  model: Optional[str] = None,
342
- patient_prompt_addition: Optional[str] = None,
343
  surveyor_attributes: Optional[List[str]] = None,
344
  surveyor_question_bank: Optional[str] = None,
345
  ai_role: Optional[str] = None,
@@ -368,7 +369,7 @@ class ConversationService:
368
  host=resolved_host,
369
  model=resolved_model,
370
  llm_backend=resolved_backend,
371
- patient_prompt_addition=patient_prompt_addition,
372
  surveyor_attributes=[s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()],
373
  surveyor_question_bank=surveyor_question_bank if isinstance(surveyor_question_bank, str) and surveyor_question_bank.strip() else None,
374
  ai_role=resolved_ai_role,
@@ -591,8 +592,10 @@ class ConversationService:
591
  patient_context = patient_persona.get("system_prompt", "") or ""
592
 
593
  patient_context = (patient_context or "").strip()
594
- if chat_info.patient_prompt_addition:
595
- patient_context = (patient_context + "\n\nAdditional patient context:\n" + chat_info.patient_prompt_addition).strip()
 
 
596
  if patient_context:
597
  system_prompt = (system_prompt + "\n\nPatient background (for context only):\n" + patient_context).strip()
598
 
@@ -651,8 +654,9 @@ class ConversationService:
651
  )
652
 
653
  system_prompt = (system_prompt or "").strip()
654
- if chat_info.patient_prompt_addition:
655
- system_prompt = (system_prompt + "\n\nAdditional instructions:\n" + chat_info.patient_prompt_addition).strip()
 
656
 
657
  response = await chat_info.client.generate(
658
  prompt=prompt_with_history,
@@ -668,7 +672,7 @@ class ConversationService:
668
  patient_persona_id: str,
669
  host: Optional[str] = None,
670
  model: Optional[str] = None,
671
- patient_prompt_addition: Optional[str] = None,
672
  surveyor_attributes: Optional[List[str]] = None,
673
  surveyor_question_bank: Optional[str] = None) -> bool:
674
  """Start a new AI-to-AI conversation.
@@ -712,7 +716,7 @@ class ConversationService:
712
  host=resolved_host,
713
  model=resolved_model,
714
  llm_backend=resolved_backend,
715
- patient_prompt_addition=patient_prompt_addition,
716
  surveyor_attributes=[s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()],
717
  surveyor_question_bank=surveyor_question_bank if isinstance(surveyor_question_bank, str) and surveyor_question_bank.strip() else None,
718
  status=ConversationStatus.STARTING,
@@ -735,7 +739,7 @@ class ConversationService:
735
  model=resolved_model,
736
  llm_backend=self.settings.llm.backend,
737
  llm_parameters=llm_parameters,
738
- patient_prompt_addition=patient_prompt_addition,
739
  surveyor_attributes=[s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()],
740
  surveyor_question_bank=surveyor_question_bank if isinstance(surveyor_question_bank, str) else None,
741
  )
@@ -1006,7 +1010,7 @@ class ConversationService:
1006
  "personas": {
1007
  "surveyor_persona_id": conv_info.surveyor_persona_id,
1008
  "patient_persona_id": conv_info.patient_persona_id,
1009
- "patient_prompt_addition": getattr(conv_info, "patient_prompt_addition", None),
1010
  "surveyor_attributes": getattr(conv_info, "surveyor_attributes", None),
1011
  "surveyor_question_bank": getattr(conv_info, "surveyor_question_bank", None),
1012
  "asked_question_ids": asked_question_ids,
 
43
  from .storage_service import get_run_store # noqa: E402
44
  from backend.storage import RunRecord # noqa: E402
45
  from backend.core.surveyor_knobs import compile_surveyor_attributes_overlay, compile_question_bank_overlay # noqa: E402
46
+ from backend.core.patient_knobs import compile_patient_attributes_overlay # noqa: E402
47
 
48
  # Setup logging
49
  logger = logging.getLogger(__name__)
 
279
  message_count: int = 0
280
  task: Optional[asyncio.Task] = None
281
  stop_requested: bool = False
282
+ patient_attributes: List[str] = field(default_factory=list)
283
  surveyor_attributes: List[str] = field(default_factory=list)
284
  surveyor_question_bank: Optional[str] = None
285
 
 
297
  status: ConversationStatus
298
  created_at: datetime
299
  stop_requested: bool = False
300
+ patient_attributes: List[str] = field(default_factory=list)
301
  surveyor_attributes: List[str] = field(default_factory=list)
302
  surveyor_question_bank: Optional[str] = None
303
  ai_role: str = "surveyor" # "surveyor" or "patient"
 
340
  patient_persona_id: str,
341
  host: Optional[str] = None,
342
  model: Optional[str] = None,
343
+ patient_attributes: Optional[List[str]] = None,
344
  surveyor_attributes: Optional[List[str]] = None,
345
  surveyor_question_bank: Optional[str] = None,
346
  ai_role: Optional[str] = None,
 
369
  host=resolved_host,
370
  model=resolved_model,
371
  llm_backend=resolved_backend,
372
+ patient_attributes=[s.strip() for s in (patient_attributes or []) if isinstance(s, str) and s.strip()],
373
  surveyor_attributes=[s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()],
374
  surveyor_question_bank=surveyor_question_bank if isinstance(surveyor_question_bank, str) and surveyor_question_bank.strip() else None,
375
  ai_role=resolved_ai_role,
 
592
  patient_context = patient_persona.get("system_prompt", "") or ""
593
 
594
  patient_context = (patient_context or "").strip()
595
+ pat_lines = [s.strip() for s in (chat_info.patient_attributes or []) if isinstance(s, str) and s.strip()]
596
+ if pat_lines:
597
+ bullets = "\n".join(f"- {line}" for line in pat_lines)
598
+ patient_context = (patient_context + "\n\nPatient attributes (for context only):\n" + bullets).strip()
599
  if patient_context:
600
  system_prompt = (system_prompt + "\n\nPatient background (for context only):\n" + patient_context).strip()
601
 
 
654
  )
655
 
656
  system_prompt = (system_prompt or "").strip()
657
+ pat_attrs = compile_patient_attributes_overlay(chat_info.patient_attributes)
658
+ if pat_attrs:
659
+ system_prompt = (system_prompt + "\n\n" + pat_attrs).strip()
660
 
661
  response = await chat_info.client.generate(
662
  prompt=prompt_with_history,
 
672
  patient_persona_id: str,
673
  host: Optional[str] = None,
674
  model: Optional[str] = None,
675
+ patient_attributes: Optional[List[str]] = None,
676
  surveyor_attributes: Optional[List[str]] = None,
677
  surveyor_question_bank: Optional[str] = None) -> bool:
678
  """Start a new AI-to-AI conversation.
 
716
  host=resolved_host,
717
  model=resolved_model,
718
  llm_backend=resolved_backend,
719
+ patient_attributes=[s.strip() for s in (patient_attributes or []) if isinstance(s, str) and s.strip()],
720
  surveyor_attributes=[s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()],
721
  surveyor_question_bank=surveyor_question_bank if isinstance(surveyor_question_bank, str) and surveyor_question_bank.strip() else None,
722
  status=ConversationStatus.STARTING,
 
739
  model=resolved_model,
740
  llm_backend=self.settings.llm.backend,
741
  llm_parameters=llm_parameters,
742
+ patient_attributes=[s.strip() for s in (patient_attributes or []) if isinstance(s, str) and s.strip()],
743
  surveyor_attributes=[s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()],
744
  surveyor_question_bank=surveyor_question_bank if isinstance(surveyor_question_bank, str) else None,
745
  )
 
1010
  "personas": {
1011
  "surveyor_persona_id": conv_info.surveyor_persona_id,
1012
  "patient_persona_id": conv_info.patient_persona_id,
1013
+ "patient_attributes": getattr(conv_info, "patient_attributes", None),
1014
  "surveyor_attributes": getattr(conv_info, "surveyor_attributes", None),
1015
  "surveyor_question_bank": getattr(conv_info, "surveyor_question_bank", None),
1016
  "asked_question_ids": asked_question_ids,
backend/api/conversation_ws.py CHANGED
@@ -333,7 +333,7 @@ async def handle_start_conversation(data: dict, conversation_id: str):
333
  patient_persona_id = data.get("patient_persona_id")
334
  host = data.get("host")
335
  model = data.get("model")
336
- patient_prompt_addition = data.get("patient_prompt_addition")
337
  surveyor_attributes = data.get("surveyor_attributes")
338
  surveyor_question_bank = data.get("surveyor_question_bank")
339
 
@@ -352,7 +352,7 @@ async def handle_start_conversation(data: dict, conversation_id: str):
352
  patient_persona_id=patient_persona_id,
353
  host=host,
354
  model=model,
355
- patient_prompt_addition=patient_prompt_addition,
356
  surveyor_attributes=surveyor_attributes,
357
  surveyor_question_bank=surveyor_question_bank,
358
  )
@@ -387,7 +387,7 @@ async def handle_start_human_chat(data: dict, conversation_id: str):
387
  host = data.get("host")
388
  model = data.get("model")
389
  ai_role = data.get("ai_role")
390
- patient_prompt_addition = data.get("patient_prompt_addition")
391
  surveyor_attributes = data.get("surveyor_attributes")
392
  surveyor_question_bank = data.get("surveyor_question_bank")
393
 
@@ -405,7 +405,7 @@ async def handle_start_human_chat(data: dict, conversation_id: str):
405
  patient_persona_id=patient_persona_id,
406
  host=host,
407
  model=model,
408
- patient_prompt_addition=patient_prompt_addition,
409
  surveyor_attributes=surveyor_attributes,
410
  surveyor_question_bank=surveyor_question_bank,
411
  ai_role=ai_role,
 
333
  patient_persona_id = data.get("patient_persona_id")
334
  host = data.get("host")
335
  model = data.get("model")
336
+ patient_attributes = data.get("patient_attributes")
337
  surveyor_attributes = data.get("surveyor_attributes")
338
  surveyor_question_bank = data.get("surveyor_question_bank")
339
 
 
352
  patient_persona_id=patient_persona_id,
353
  host=host,
354
  model=model,
355
+ patient_attributes=patient_attributes,
356
  surveyor_attributes=surveyor_attributes,
357
  surveyor_question_bank=surveyor_question_bank,
358
  )
 
387
  host = data.get("host")
388
  model = data.get("model")
389
  ai_role = data.get("ai_role")
390
+ patient_attributes = data.get("patient_attributes")
391
  surveyor_attributes = data.get("surveyor_attributes")
392
  surveyor_question_bank = data.get("surveyor_question_bank")
393
 
 
405
  patient_persona_id=patient_persona_id,
406
  host=host,
407
  model=model,
408
+ patient_attributes=patient_attributes,
409
  surveyor_attributes=surveyor_attributes,
410
  surveyor_question_bank=surveyor_question_bank,
411
  ai_role=ai_role,
backend/core/conversation_manager.py CHANGED
@@ -32,6 +32,7 @@ sys.path.insert(0, str(Path(__file__).parent.parent))
32
  from backend.core.llm_client import create_llm_client
33
  from backend.core.persona_system import PersonaSystem
34
  from backend.core.surveyor_knobs import compile_surveyor_attributes_overlay, compile_question_bank_overlay
 
35
 
36
  SURVEYOR_MAX_TOKENS = 140
37
  PATIENT_MAX_TOKENS = 240
@@ -80,7 +81,7 @@ class ConversationManager:
80
  patient_persona_id: str = None,
81
  surveyor_persona: dict = None,
82
  patient_persona: dict = None,
83
- patient_prompt_addition: Optional[str] = None,
84
  surveyor_attributes: Optional[List[str]] = None,
85
  surveyor_question_bank: Optional[str] = None,
86
  host: str = "http://localhost:11434",
@@ -129,7 +130,7 @@ class ConversationManager:
129
  self.state = ConversationState.INITIALIZED
130
  self.history: List[Dict] = []
131
  self.turn_count = 0
132
- self.patient_prompt_addition = (patient_prompt_addition or "").strip()
133
  self.surveyor_attributes = [s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()]
134
  self.surveyor_question_bank = (surveyor_question_bank or "").strip() or None
135
  self.asked_question_ids: List[str] = []
@@ -329,8 +330,9 @@ class ConversationManager:
329
  conversation_history=conversation_history,
330
  user_prompt=user_prompt
331
  )
332
- if self.patient_prompt_addition:
333
- system_prompt = f"{system_prompt}\n\nAdditional instructions:\n{self.patient_prompt_addition}"
 
334
 
335
  # Generate response
336
  response = await self.client.generate(
 
32
  from backend.core.llm_client import create_llm_client
33
  from backend.core.persona_system import PersonaSystem
34
  from backend.core.surveyor_knobs import compile_surveyor_attributes_overlay, compile_question_bank_overlay
35
+ from backend.core.patient_knobs import compile_patient_attributes_overlay
36
 
37
  SURVEYOR_MAX_TOKENS = 140
38
  PATIENT_MAX_TOKENS = 240
 
81
  patient_persona_id: str = None,
82
  surveyor_persona: dict = None,
83
  patient_persona: dict = None,
84
+ patient_attributes: Optional[List[str]] = None,
85
  surveyor_attributes: Optional[List[str]] = None,
86
  surveyor_question_bank: Optional[str] = None,
87
  host: str = "http://localhost:11434",
 
130
  self.state = ConversationState.INITIALIZED
131
  self.history: List[Dict] = []
132
  self.turn_count = 0
133
+ self.patient_attributes = [s.strip() for s in (patient_attributes or []) if isinstance(s, str) and s.strip()]
134
  self.surveyor_attributes = [s.strip() for s in (surveyor_attributes or []) if isinstance(s, str) and s.strip()]
135
  self.surveyor_question_bank = (surveyor_question_bank or "").strip() or None
136
  self.asked_question_ids: List[str] = []
 
330
  conversation_history=conversation_history,
331
  user_prompt=user_prompt
332
  )
333
+ attrs = compile_patient_attributes_overlay(self.patient_attributes)
334
+ if attrs:
335
+ system_prompt = f"{system_prompt}\n\n{attrs}"
336
 
337
  # Generate response
338
  response = await self.client.generate(
backend/core/patient_knobs.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+
6
+ def compile_patient_attributes_overlay(attributes: Any) -> str:
7
+ if not isinstance(attributes, list):
8
+ return ""
9
+ cleaned = [s.strip() for s in attributes if isinstance(s, str) and s.strip()]
10
+ if not cleaned:
11
+ return ""
12
+ bullets = "\n".join(f"- {line}" for line in cleaned)
13
+ return "Continue the conversation following these patient attributes:\n" + bullets
14
+
docs/persona-knobs.md CHANGED
@@ -56,9 +56,6 @@ A list of survey questions the surveyor must work through.
56
 
57
  When a question bank is present, the surveyor outputs strict JSON including the chosen question id (e.g., `q01`) so asked questions can be tracked deterministically.
58
 
59
- ### Surveyor prompt addition (optional, per-run)
60
- Additional free-form instructions appended last (lowest priority).
61
-
62
  ---
63
 
64
  ## Patient controls (Core v1, minimal)
@@ -69,8 +66,8 @@ Patient structured controls are intentionally minimal for now; the synthetic pat
69
  - `name` (required): display name.
70
  - `description` (optional): short one-liner shown in selectors.
71
 
72
- ### Patient prompt addition (optional, per-run)
73
- Patient prompt addition is supported as a run-level override.
74
 
75
  ---
76
 
 
56
 
57
  When a question bank is present, the surveyor outputs strict JSON including the chosen question id (e.g., `q01`) so asked questions can be tracked deterministically.
58
 
 
 
 
59
  ---
60
 
61
  ## Patient controls (Core v1, minimal)
 
66
  - `name` (required): display name.
67
  - `description` (optional): short one-liner shown in selectors.
68
 
69
+ ### Attributes (plain bullet lines)
70
+ A list of short rules the patient should follow during the session, compiled into the patient prompt so the control always matters.
71
 
72
  ---
73
 
docs/roadmap.md CHANGED
@@ -63,7 +63,7 @@ _Last updated: 2026-01-22_
63
  - Follow-ups:
64
  - Improve Human-to-AI turn-taking clarity (make it obvious who should speak next, especially when AI role = Patient).
65
  - Make persona ids backend-oriented and avoid showing raw ids in user-facing dropdowns (prefer friendly display names only).
66
- - **9.2 Patient controls v1**: patient prompt addition + (optional) patient attributes list (local-first).
67
  - **9.3 Analysis Agent controls v1**: show effective template + allow selecting template (read-only if templates aren’t CRUD yet).
68
  - **9.4 System controls v1**: show effective runtime config (LLM backend/model, DB path, auth status) as read-only “Effective settings”.
69
  - **9.5 Validation + guardrails**: schema versioning, reset-to-defaults, and explicit “applies next run” UX.
 
63
  - Follow-ups:
64
  - Improve Human-to-AI turn-taking clarity (make it obvious who should speak next, especially when AI role = Patient).
65
  - Make persona ids backend-oriented and avoid showing raw ids in user-facing dropdowns (prefer friendly display names only).
66
+ - **9.2 Patient controls v1 (Attributes list)**: patient persona attributes (local-first, compiled into the patient prompt).
67
  - **9.3 Analysis Agent controls v1**: show effective template + allow selecting template (read-only if templates aren’t CRUD yet).
68
  - **9.4 System controls v1**: show effective runtime config (LLM backend/model, DB path, auth status) as read-only “Effective settings”.
69
  - **9.5 Validation + guardrails**: schema versioning, reset-to-defaults, and explicit “applies next run” UX.
frontend/pages/config_view.py CHANGED
@@ -32,9 +32,15 @@ def get_config_view_js() -> str:
32
  }
33
  return [];
34
  });
35
- const [patientPromptAddition, setPatientPromptAddition] = React.useState(() => {
36
  const data = getPatientPersonaData(existing || {}, initialEditorPatientId) || {};
37
- return (typeof data.prompt_addition === 'string') ? data.prompt_addition : '';
 
 
 
 
 
 
38
  });
39
  const [savedAt, setSavedAt] = React.useState(existing?.saved_at || null);
40
  const [saveFlash, setSaveFlash] = React.useState(false);
@@ -71,6 +77,18 @@ def get_config_view_js() -> str:
71
  setSurveyorAttributeItems((prev) => (prev || []).filter((_, i) => i !== idx));
72
  };
73
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  React.useEffect(() => {
75
  authedFetch('/api/personas')
76
  .then(r => r.json())
@@ -104,7 +122,8 @@ def get_config_view_js() -> str:
104
  React.useEffect(() => {
105
  const cfg = loadConfig() || {};
106
  const data = getPatientPersonaData(cfg, selectedPatientId) || {};
107
- setPatientPromptAddition((typeof data.prompt_addition === 'string') ? data.prompt_addition : '');
 
108
  }, [selectedPatientId]);
109
 
110
  const onSave = () => {
@@ -123,7 +142,9 @@ def get_config_view_js() -> str:
123
  };
124
 
125
  patientStore[selectedPatientId] = {
126
- prompt_addition: (patientPromptAddition || '')
 
 
127
  };
128
 
129
  const cfg = Object.assign({}, prev, {
@@ -267,8 +288,8 @@ def get_config_view_js() -> str:
267
  </div>
268
  </div>
269
  ) : activePane === 'patients' ? (
270
- <div className="mt-6 grid grid-cols-2 gap-6">
271
- <div>
272
  <label className="block text-sm font-semibold text-slate-700 mb-2">Edit patient persona</label>
273
  <select
274
  className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
@@ -279,18 +300,51 @@ def get_config_view_js() -> str:
279
  <option key={p.id} value={p.id}>{p.name}</option>
280
  ))}
281
  </select>
 
282
 
283
- <label className="block text-sm font-semibold text-slate-700 mt-4 mb-2">Patient prompt addition (optional)</label>
284
- <textarea
285
- className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white h-28"
286
- placeholder="Extra context for this patient persona (used when the patient persona is selected in a run)."
287
- value={patientPromptAddition}
288
- onChange={(e) => setPatientPromptAddition(e.target.value)}
289
- />
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
290
  </div>
291
- <div className="text-sm text-slate-600">
292
- Patient structured controls are not implemented yet; this field is a per-patient freeform override used for context.
293
- </div>
294
  </div>
295
  ) : activePane === 'analysis' ? (
296
  <div className="mt-6 text-sm text-slate-600">
 
32
  }
33
  return [];
34
  });
35
+ const [patientAttributeItems, setPatientAttributeItems] = React.useState(() => {
36
  const data = getPatientPersonaData(existing || {}, initialEditorPatientId) || {};
37
+ const attrs = Array.isArray(data.attributes) ? data.attributes : null;
38
+ if (attrs && attrs.length) {
39
+ return attrs
40
+ .map((x) => (typeof x === 'string' ? x.trim() : ''))
41
+ .filter((x) => x.length > 0);
42
+ }
43
+ return [];
44
  });
45
  const [savedAt, setSavedAt] = React.useState(existing?.saved_at || null);
46
  const [saveFlash, setSaveFlash] = React.useState(false);
 
77
  setSurveyorAttributeItems((prev) => (prev || []).filter((_, i) => i !== idx));
78
  };
79
 
80
+ const addPatientAttribute = () => {
81
+ setPatientAttributeItems((prev) => ([...(prev || []), '' ]));
82
+ };
83
+
84
+ const updatePatientAttribute = (idx, text) => {
85
+ setPatientAttributeItems((prev) => (prev || []).map((v, i) => (i === idx ? text : v)));
86
+ };
87
+
88
+ const removePatientAttribute = (idx) => {
89
+ setPatientAttributeItems((prev) => (prev || []).filter((_, i) => i !== idx));
90
+ };
91
+
92
  React.useEffect(() => {
93
  authedFetch('/api/personas')
94
  .then(r => r.json())
 
122
  React.useEffect(() => {
123
  const cfg = loadConfig() || {};
124
  const data = getPatientPersonaData(cfg, selectedPatientId) || {};
125
+ const attrs = Array.isArray(data.attributes) ? data.attributes : [];
126
+ setPatientAttributeItems(attrs.map((x) => (typeof x === 'string' ? x.trim() : '')).filter((x) => x.length > 0));
127
  }, [selectedPatientId]);
128
 
129
  const onSave = () => {
 
142
  };
143
 
144
  patientStore[selectedPatientId] = {
145
+ attributes: (patientAttributeItems || [])
146
+ .map((x) => (typeof x === 'string' ? x.trim() : ''))
147
+ .filter((x) => x.length > 0),
148
  };
149
 
150
  const cfg = Object.assign({}, prev, {
 
288
  </div>
289
  </div>
290
  ) : activePane === 'patients' ? (
291
+ <div className="mt-6">
292
+ <div className="mb-4">
293
  <label className="block text-sm font-semibold text-slate-700 mb-2">Edit patient persona</label>
294
  <select
295
  className="w-full border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
 
300
  <option key={p.id} value={p.id}>{p.name}</option>
301
  ))}
302
  </select>
303
+ </div>
304
 
305
+ <div className="grid grid-cols-2 gap-6">
306
+ <div>
307
+ <label className="block text-sm font-semibold text-slate-700 mb-2">Attributes</label>
308
+ <div className="text-xs text-slate-500 mb-2">
309
+ Plain-language rules the patient should follow during the session.
310
+ </div>
311
+ <div className="space-y-2">
312
+ {(patientAttributeItems || []).length === 0 ? (
313
+ <div className="text-sm text-slate-500">No attributes yet.</div>
314
+ ) : (
315
+ (patientAttributeItems || []).map((attr, idx) => (
316
+ <div key={idx} className="flex items-start gap-2">
317
+ <div className="mt-2 text-xs font-mono text-slate-500 w-10 shrink-0">{String(idx + 1).padStart(2, '0')}</div>
318
+ <input
319
+ className="flex-1 border border-slate-300 rounded-lg px-3 py-2 text-sm bg-white"
320
+ value={attr || ''}
321
+ onChange={(e) => updatePatientAttribute(idx, e.target.value)}
322
+ placeholder="Type an attribute..."
323
+ />
324
+ <button
325
+ type="button"
326
+ onClick={() => removePatientAttribute(idx)}
327
+ className="text-slate-600 hover:text-red-600 px-2 py-2 text-sm"
328
+ title="Remove attribute"
329
+ >
330
+
331
+ </button>
332
+ </div>
333
+ ))
334
+ )}
335
+ <button
336
+ type="button"
337
+ onClick={addPatientAttribute}
338
+ className="bg-slate-100 hover:bg-slate-200 text-slate-800 px-3 py-2 rounded-lg text-sm font-semibold transition-all border border-slate-300"
339
+ >
340
+ + Add attribute
341
+ </button>
342
+ </div>
343
+ </div>
344
+ <div className="text-sm text-slate-600">
345
+ These attributes are compiled into the patient prompt for the next run when this patient persona is selected.
346
+ </div>
347
  </div>
 
 
 
348
  </div>
349
  ) : activePane === 'analysis' ? (
350
  <div className="mt-6 text-sm text-slate-600">
frontend/pages/main_page.py CHANGED
@@ -76,12 +76,6 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
76
  const store = getPatientPersonaStore(cfg);
77
  const data = store[patientId];
78
  if (data && typeof data === 'object') return data;
79
-
80
- // Backward compatibility: legacy global field tied to `cfg.patient_persona_id`.
81
- const legacyId = cfg && cfg.patient_persona_id;
82
- if (legacyId === patientId && typeof cfg.patient_prompt_addition === 'string') {
83
- return { prompt_addition: cfg.patient_prompt_addition };
84
- }
85
  return null;
86
  }
87
 
@@ -104,10 +98,12 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
104
  .filter((x) => x.length > 0);
105
  }
106
 
107
- function getPatientPromptAdditionFor(cfg, patientId) {
108
  const data = getPatientPersonaData(cfg, patientId) || {};
109
- const raw = (typeof data.prompt_addition === 'string') ? data.prompt_addition : '';
110
- return raw || '';
 
 
111
  }
112
 
113
  function getRunPersonaSelection(cfg, kind) {
@@ -678,7 +674,7 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
678
  const cfg = loadConfig() || {};
679
  const surveyorQuestionBank = getSurveyorQuestionBankFor(cfg, surveyorId);
680
  const surveyorAttributes = getSurveyorAttributesFor(cfg, surveyorId);
681
- const patientPromptAddition = getPatientPromptAdditionFor(cfg, patientId);
682
 
683
  setTimeout(() => {
684
  if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
@@ -688,7 +684,7 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
688
  patient_persona_id: patientId,
689
  surveyor_question_bank: surveyorQuestionBank || undefined,
690
  surveyor_attributes: surveyorAttributes.length ? surveyorAttributes : undefined,
691
- patient_prompt_addition: patientPromptAddition || undefined,
692
  }));
693
  }
694
  }, 500);
@@ -710,7 +706,7 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
710
  const cfg = loadConfig() || {};
711
  const surveyorQuestionBank = getSurveyorQuestionBankFor(cfg, surveyorId);
712
  const surveyorAttributes = getSurveyorAttributesFor(cfg, surveyorId);
713
- const patientPromptAddition = getPatientPromptAdditionFor(cfg, patientId);
714
 
715
  setTimeout(() => {
716
  if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
@@ -721,7 +717,7 @@ def get_main_page_html(auth_enabled: bool = False) -> str:
721
  patient_persona_id: patientId,
722
  surveyor_question_bank: surveyorQuestionBank || undefined,
723
  surveyor_attributes: surveyorAttributes.length ? surveyorAttributes : undefined,
724
- patient_prompt_addition: patientPromptAddition || undefined,
725
  }));
726
  }
727
  }, 500);
 
76
  const store = getPatientPersonaStore(cfg);
77
  const data = store[patientId];
78
  if (data && typeof data === 'object') return data;
 
 
 
 
 
 
79
  return null;
80
  }
81
 
 
98
  .filter((x) => x.length > 0);
99
  }
100
 
101
+ function getPatientAttributesFor(cfg, patientId) {
102
  const data = getPatientPersonaData(cfg, patientId) || {};
103
+ const attrs = Array.isArray(data.attributes) ? data.attributes : [];
104
+ return attrs
105
+ .map((x) => (typeof x === 'string') ? x.trim() : '')
106
+ .filter((x) => x.length > 0);
107
  }
108
 
109
  function getRunPersonaSelection(cfg, kind) {
 
674
  const cfg = loadConfig() || {};
675
  const surveyorQuestionBank = getSurveyorQuestionBankFor(cfg, surveyorId);
676
  const surveyorAttributes = getSurveyorAttributesFor(cfg, surveyorId);
677
+ const patientAttributes = getPatientAttributesFor(cfg, patientId);
678
 
679
  setTimeout(() => {
680
  if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
 
684
  patient_persona_id: patientId,
685
  surveyor_question_bank: surveyorQuestionBank || undefined,
686
  surveyor_attributes: surveyorAttributes.length ? surveyorAttributes : undefined,
687
+ patient_attributes: patientAttributes.length ? patientAttributes : undefined,
688
  }));
689
  }
690
  }, 500);
 
706
  const cfg = loadConfig() || {};
707
  const surveyorQuestionBank = getSurveyorQuestionBankFor(cfg, surveyorId);
708
  const surveyorAttributes = getSurveyorAttributesFor(cfg, surveyorId);
709
+ const patientAttributes = getPatientAttributesFor(cfg, patientId);
710
 
711
  setTimeout(() => {
712
  if (wsRef.current && wsRef.current.readyState === WebSocket.OPEN) {
 
717
  patient_persona_id: patientId,
718
  surveyor_question_bank: surveyorQuestionBank || undefined,
719
  surveyor_attributes: surveyorAttributes.length ? surveyorAttributes : undefined,
720
+ patient_attributes: patientAttributes.length ? patientAttributes : undefined,
721
  }));
722
  }
723
  }, 500);
frontend/react_gradio_hybrid.py CHANGED
@@ -166,7 +166,7 @@ async def frontend_websocket(websocket: WebSocket, conversation_id: str):
166
  "patient_persona_id": data.get("patient_persona_id", "cooperative_senior_001"),
167
  "surveyor_question_bank": data.get("surveyor_question_bank"),
168
  "surveyor_attributes": data.get("surveyor_attributes"),
169
- "patient_prompt_addition": data.get("patient_prompt_addition"),
170
  "host": settings.llm.host,
171
  "model": settings.llm.model,
172
  })
@@ -190,7 +190,7 @@ async def frontend_websocket(websocket: WebSocket, conversation_id: str):
190
  "patient_persona_id": data.get("patient_persona_id", "cooperative_senior_001"),
191
  "surveyor_question_bank": data.get("surveyor_question_bank"),
192
  "surveyor_attributes": data.get("surveyor_attributes"),
193
- "patient_prompt_addition": data.get("patient_prompt_addition"),
194
  "host": settings.llm.host,
195
  "model": settings.llm.model,
196
  })
 
166
  "patient_persona_id": data.get("patient_persona_id", "cooperative_senior_001"),
167
  "surveyor_question_bank": data.get("surveyor_question_bank"),
168
  "surveyor_attributes": data.get("surveyor_attributes"),
169
+ "patient_attributes": data.get("patient_attributes"),
170
  "host": settings.llm.host,
171
  "model": settings.llm.model,
172
  })
 
190
  "patient_persona_id": data.get("patient_persona_id", "cooperative_senior_001"),
191
  "surveyor_question_bank": data.get("surveyor_question_bank"),
192
  "surveyor_attributes": data.get("surveyor_attributes"),
193
+ "patient_attributes": data.get("patient_attributes"),
194
  "host": settings.llm.host,
195
  "model": settings.llm.model,
196
  })