Spaces:
Paused
Paused
File size: 13,553 Bytes
78caafb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 |
"""Chat handler for Q&A and document modifications.
Uses the vision model (Qwen3-VL-30B-A3B-Thinking-FP8) in text-only mode
for chat interactions. The model handles both Q&A about assessment results
and document modification requests.
"""
import json
import logging
import re
from typing import Optional
from config.settings import settings
logger = logging.getLogger(__name__)
# Chat system prompt
CHAT_SYSTEM_PROMPT = """You are an expert industrial hygienist assistant helping with fire damage assessment. You have access to the assessment results and can answer questions or modify the generated document.
## Your Capabilities
1. **Answer Questions**: Explain zone classifications, material detections, sampling recommendations, and methodology.
2. **Document Modifications**: Add notes, update sections, or make changes to the generated Scope of Work document.
## When Modifying Documents
If the user requests a document change, include a modification command in your response using this format:
<document_edit>
{
"action": "add_note" | "add_section" | "append_to_section",
"section": "Section Name",
"content": "Content to add..."
}
</document_edit>
Available sections:
- Additional Notes (for general notes)
- Room Information
- Scope Summary
- AI Vision Analysis Summary
- Field Observations
- Material Dispositions
- Cleaning Specifications
- Air Filtration Requirements
- Sampling Plan
- Regulatory References
- Threshold Documentation
- Disclaimer
## Guidelines
- Be concise and professional
- Reference specific assessment data when answering questions
- For modifications, only change what the user requests
- Always explain changes you're making"""
class ChatHandler:
"""Handles chat interactions for assessment Q&A and document modifications."""
def __init__(self, model_stack=None):
"""Initialize chat handler.
Args:
model_stack: Model stack (RealModelStack or MockModelStack).
If None, will be loaded from get_models().
"""
self.model_stack = model_stack
def process_message(
self,
user_message: str,
session_state,
chat_history: list[dict],
) -> tuple[str, Optional[dict], list[dict]]:
"""Process a chat message and return response.
Args:
user_message: The user's message
session_state: Current SessionState with assessment data
chat_history: Previous chat messages in Gradio messages format
Returns:
Tuple of (response_text, document_edit_or_none, updated_chat_history)
"""
# Lazy load model stack if not provided
if self.model_stack is None:
from models.loader import get_models
self.model_stack = get_models()
# Build context from session
context = self._build_context(session_state)
# Generate response
if settings.mock_models:
response = self._generate_mock_response(user_message, context)
else:
response = self._generate_real_response(
user_message, context, chat_history
)
# Parse for document edits
document_edit = self._parse_document_edit(response)
# Clean response (remove document_edit tags for display)
display_response = re.sub(
r'<document_edit>.*?</document_edit>',
'',
response,
flags=re.DOTALL
).strip()
# Update chat history
updated_history = chat_history.copy()
updated_history.append({"role": "user", "content": user_message})
updated_history.append({"role": "assistant", "content": display_response})
return display_response, document_edit, updated_history
def _build_context(self, session_state) -> str:
"""Build context string from session state for chat."""
context_parts = []
# Room info
room = session_state.room
context_parts.append(f"Room: {room.name}")
context_parts.append(f"Dimensions: {room.length_ft}' x {room.width_ft}' x {room.ceiling_height_ft}'")
context_parts.append(f"Facility: {room.facility_classification}")
context_parts.append(f"Era: {room.construction_era}")
# Images summary
context_parts.append(f"Images analyzed: {len(session_state.images)}")
# Pipeline results if available
if session_state.pipeline_result_json:
try:
result = json.loads(session_state.pipeline_result_json)
if result.get("vision_results"):
context_parts.append("\nVision Analysis Summary:")
for img_id, analysis in result["vision_results"].items():
zone = analysis.get("zone", {}).get("classification", "unknown")
condition = analysis.get("condition", {}).get("level", "unknown")
context_parts.append(f" - {img_id}: zone={zone}, condition={condition}")
if result.get("dispositions"):
context_parts.append(f"\nDispositions: {len(result['dispositions'])} materials analyzed")
except json.JSONDecodeError:
pass
return "\n".join(context_parts)
def _generate_real_response(
self,
user_message: str,
context: str,
chat_history: list[dict],
) -> str:
"""Generate response using real vision model in text-only mode."""
try:
# Get vision model components
vision = self.model_stack.vision
model = vision.model
processor = vision.processor
sampling_params = vision.sampling_params
# Build messages (text-only, no image)
messages = [
{"role": "system", "content": CHAT_SYSTEM_PROMPT},
]
# Add context as first user message
messages.append({
"role": "user",
"content": f"Assessment Context:\n{context}"
})
messages.append({
"role": "assistant",
"content": "I understand the assessment context. How can I help you?"
})
# Add chat history
for msg in chat_history:
messages.append(msg)
# Add current user message
messages.append({"role": "user", "content": user_message})
# Apply chat template
prompt = processor.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True,
)
# Generate (text-only: no multi_modal_data)
outputs = model.generate(
prompts=[prompt], # Just prompt string, no image
sampling_params=sampling_params,
)
return outputs[0].outputs[0].text
except Exception as e:
logger.error(f"Chat generation failed: {e}")
return f"I apologize, but I encountered an error processing your request: {str(e)}"
def _generate_mock_response(self, user_message: str, context: str) -> str:
"""Generate mock response for local development."""
user_lower = user_message.lower()
# Pattern matching for common questions
if "zone" in user_lower or "classification" in user_lower:
return """Zone classifications per FDAM §4.1 (IICRC/RIA/CIRI Technical Guide):
- **Burn Zone**: Direct fire involvement with structural char and complete combustion
- **Near-Field**: Adjacent to burn zone, heavy smoke/heat exposure, visible contamination
- **Far-Field**: Smoke migration only, light deposits, no structural damage
The AI analyzed each image to determine which zone best describes the visible conditions based on these criteria."""
if "material" in user_lower:
return """Materials are categorized by porosity, which affects cleaning requirements:
- **Non-porous**: Steel, concrete, glass, CMU - can typically be cleaned
- **Semi-porous**: Painted drywall, sealed wood - may require evaluation
- **Porous**: Carpet, insulation, acoustic tile - often require removal
The assessment identified materials visible in each image and assigned dispositions based on contamination level."""
if "sampling" in user_lower or "sample" in user_lower:
return """The sampling plan follows FDAM §2.3 requirements:
- **Tape lifts**: For particle identification via PLM (polarized light microscopy)
- **Surface wipes**: For metals quantification per NIOSH Method 9100 / BNL SOP IH75190
**Sample Density per FDAM §2.3:**
- <5,000 SF: 3-5 samples per surface type
- 5,000-25,000 SF: 5-10 samples per surface type
- 25,000-100,000 SF: 10-20 samples per surface type
- >100,000 SF: 20+ samples per surface type
**Ceiling Deck Enhancement (FDAM §4.5):** 1 sample per 2,500 SF due to 82.4% pass rate vs 95%+ for other surfaces."""
if "add" in user_lower and "note" in user_lower:
# Extract what they want to add
return """I'll add that note to the document.
<document_edit>
{
"action": "add_note",
"section": "Additional Notes",
"content": "Note added per assessor request during review."
}
</document_edit>
The note has been added to the Additional Notes section."""
if "explain" in user_lower or "why" in user_lower:
return """Based on the visual analysis and FDAM methodology:
The zone and condition classifications are determined by analyzing:
1. Distance from fire origin indicators
2. Visible contamination patterns
3. Structural damage presence
4. Material surface conditions
Each factor contributes to the overall assessment with confidence scores reflecting certainty."""
# Default response
return """I can help you understand the assessment results or make changes to the document.
**Questions I can answer:**
- Zone classification explanations
- Material detection details
- Sampling plan rationale
- Methodology references
**Changes I can make:**
- Add notes to any section
- Update specific content
- Add clarifications
What would you like to know or change?"""
def _parse_document_edit(self, response: str) -> Optional[dict]:
"""Parse document edit command from response."""
match = re.search(
r'<document_edit>\s*(\{.*?\})\s*</document_edit>',
response,
re.DOTALL
)
if match:
try:
return json.loads(match.group(1))
except json.JSONDecodeError:
logger.warning("Failed to parse document edit JSON")
return None
return None
def apply_document_edit(
self,
document: str,
edit: dict,
) -> str:
"""Apply a document edit to the current document.
Args:
document: Current markdown document
edit: Edit command with action, section, content
Returns:
Modified document
"""
action = edit.get("action", "")
section = edit.get("section", "")
content = edit.get("content", "")
if not content:
return document
if action == "add_note":
# Add note to Additional Notes section, or create it
if "## Additional Notes" in document:
# Append to existing section
document = document.replace(
"## Additional Notes",
f"## Additional Notes\n\n{content}"
)
else:
# Add before Disclaimer
if "## Disclaimer" in document:
document = document.replace(
"## Disclaimer",
f"## Additional Notes\n\n{content}\n\n---\n\n## Disclaimer"
)
else:
# Add at end
document += f"\n\n---\n\n## Additional Notes\n\n{content}"
elif action == "add_section":
# Add new section before Disclaimer
new_section = f"## {section}\n\n{content}"
if "## Disclaimer" in document:
document = document.replace(
"## Disclaimer",
f"{new_section}\n\n---\n\n## Disclaimer"
)
else:
document += f"\n\n---\n\n{new_section}"
elif action == "append_to_section":
# Find section and append content
section_pattern = rf"(## {re.escape(section)}.*?)(\n---|\Z)"
match = re.search(section_pattern, document, re.DOTALL)
if match:
section_content = match.group(1)
document = document.replace(
section_content,
f"{section_content}\n\n{content}"
)
return document
# Quick action messages
QUICK_ACTIONS = {
"explain_zones": "Explain how the zone classifications were determined for this assessment.",
"explain_materials": "What materials were detected and why were they categorized this way?",
"explain_sampling": "Explain the sampling plan recommendations.",
"add_note": "I need to add a note to the document. Please tell me what note you'd like to add.",
}
def get_quick_action_message(action_key: str) -> str:
"""Get the message for a quick action button."""
return QUICK_ACTIONS.get(action_key, "")
|