Spaces:
Paused
Paused
Commit
·
88bdcff
0
Parent(s):
Initial commit: FDAM AI Pipeline v4.0.1
Browse files- Gradio-based fire damage assessment application
- Qwen3-VL vision model integration (mock + real)
- RAG-based knowledge retrieval with ChromaDB
- FDAM-compliant calculations (ACH, sample density)
- PDF generation with WeasyPrint
- Session persistence via localStorage
- 151 passing tests
Ready for HuggingFace Spaces deployment on 4xL4 (96GB VRAM)
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This view is limited to 50 files because it contains too many changes.
See raw diff
- .env.example +14 -0
- .gitignore +43 -0
- CLAUDE.md +174 -0
- FDAM_AI_Pipeline_Technical_Spec.md +0 -0
- RAG-KB/FDAM_v4_METHODOLOGY.md +994 -0
- RAG-KB/Fire Remediation Processes and Methodologies_ A Review of Industry-Endorsed Standards.md +86 -0
- RAG-KB/Industrial Hygiene Lab Services Guide.md +369 -0
- RAG-KB/Metals clearance criteria-QVC.md +622 -0
- RAG-KB/Technical Guide for Wildfire Restoration - Key Information.md +79 -0
- RAG-KB/air-o-cell-method-guide-atlas.md +0 -0
- RAG-KB/wildfire_soot_particulate_removal_full_text_extraction.md +134 -0
- README.md +70 -0
- app.py +428 -0
- config/__init__.py +0 -0
- config/inference.py +34 -0
- config/settings.py +45 -0
- models/__init__.py +0 -0
- models/loader.py +37 -0
- models/mock.py +157 -0
- models/real.py +439 -0
- pipeline/__init__.py +23 -0
- pipeline/calculations.py +325 -0
- pipeline/dispositions.py +364 -0
- pipeline/generator.py +466 -0
- pipeline/main.py +334 -0
- pipeline/pdf_generator.py +315 -0
- rag/__init__.py +16 -0
- rag/chunker.py +432 -0
- rag/index_builder.py +187 -0
- rag/retriever.py +380 -0
- rag/vectorstore.py +287 -0
- requirements.txt +31 -0
- schemas/__init__.py +109 -0
- schemas/input.py +255 -0
- schemas/output.py +238 -0
- tests/__init__.py +0 -0
- tests/test_pdf_generator.py +246 -0
- tests/test_pipeline.py +525 -0
- tests/test_rag.py +536 -0
- tests/test_schemas.py +459 -0
- tests/test_tabs.py +381 -0
- tests/test_ui_state.py +360 -0
- ui/__init__.py +86 -0
- ui/components.py +272 -0
- ui/state.py +273 -0
- ui/storage.py +205 -0
- ui/tabs/__init__.py +15 -0
- ui/tabs/images.py +328 -0
- ui/tabs/observations.py +281 -0
- ui/tabs/project.py +251 -0
.env.example
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FDAM AI Pipeline Environment Configuration
|
| 2 |
+
|
| 3 |
+
# Set to true for local development with mock models (RTX 4090)
|
| 4 |
+
# Set to false for production with real models (HuggingFace 4xL4)
|
| 5 |
+
MOCK_MODELS=true
|
| 6 |
+
|
| 7 |
+
# Server configuration (0.0.0.0 required for WSL)
|
| 8 |
+
SERVER_HOST=0.0.0.0
|
| 9 |
+
SERVER_PORT=7860
|
| 10 |
+
|
| 11 |
+
# Optional: Override model paths
|
| 12 |
+
# VISION_MODEL=Qwen/Qwen3-VL-30B-A3B-Instruct
|
| 13 |
+
# EMBEDDING_MODEL=Qwen/Qwen3-VL-Embedding-8B
|
| 14 |
+
# RERANKER_MODEL=Qwen/Qwen3-VL-Reranker-8B
|
.gitignore
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
.venv/
|
| 8 |
+
venv/
|
| 9 |
+
ENV/
|
| 10 |
+
|
| 11 |
+
# Environment
|
| 12 |
+
.env
|
| 13 |
+
|
| 14 |
+
# IDE
|
| 15 |
+
.vscode/
|
| 16 |
+
.idea/
|
| 17 |
+
*.swp
|
| 18 |
+
*.swo
|
| 19 |
+
|
| 20 |
+
# Testing
|
| 21 |
+
.pytest_cache/
|
| 22 |
+
.coverage
|
| 23 |
+
htmlcov/
|
| 24 |
+
.mypy_cache/
|
| 25 |
+
|
| 26 |
+
# Generated
|
| 27 |
+
chroma_db/
|
| 28 |
+
outputs/
|
| 29 |
+
*.pdf
|
| 30 |
+
*.log
|
| 31 |
+
|
| 32 |
+
# OS
|
| 33 |
+
.DS_Store
|
| 34 |
+
Thumbs.db
|
| 35 |
+
|
| 36 |
+
# HuggingFace
|
| 37 |
+
*.safetensors
|
| 38 |
+
*.bin
|
| 39 |
+
*.pt
|
| 40 |
+
*.ckpt
|
| 41 |
+
|
| 42 |
+
# Claude Code
|
| 43 |
+
.claude/
|
CLAUDE.md
ADDED
|
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# CLAUDE.md
|
| 2 |
+
|
| 3 |
+
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
| 4 |
+
|
| 5 |
+
## Project Overview
|
| 6 |
+
|
| 7 |
+
**FDAM AI Pipeline** - Fire Damage Assessment Methodology v4.0.1 implementation. An AI-powered system that generates professional Cleaning Specifications / Scope of Work documents for fire damage restoration.
|
| 8 |
+
|
| 9 |
+
- **Deployment**: HuggingFace Spaces with Nvidia 4xL4 (96GB VRAM total, 24GB per GPU)
|
| 10 |
+
- **Local Dev**: RTX 4090 (24GB) - insufficient for full model stack; use mock models locally
|
| 11 |
+
- **Spec Document**: `FDAM_AI_Pipeline_Technical_Spec.md` is the authoritative technical reference
|
| 12 |
+
|
| 13 |
+
## Critical Constraints
|
| 14 |
+
|
| 15 |
+
1. **No External API Calls** - 100% locally-owned models only (no Claude/OpenAI APIs)
|
| 16 |
+
2. **Memory Budget** - 4xL4 96GB total: ~58GB vision (30B BF16) + ~16GB embedding + ~16GB reranker (~90GB used, ~6GB headroom)
|
| 17 |
+
3. **Processing Time** - 60-90 seconds per assessment is acceptable
|
| 18 |
+
4. **MVP Scope** - Phase 1 (PRE) and Phase 2 (PRA) only; no lab results processing yet
|
| 19 |
+
5. **Static RAG** - Knowledge base is pre-indexed; no user document uploads
|
| 20 |
+
|
| 21 |
+
## Tech Stack
|
| 22 |
+
|
| 23 |
+
| Component | Technology |
|
| 24 |
+
|-----------|------------|
|
| 25 |
+
| UI Framework | Gradio 4.x |
|
| 26 |
+
| Vision/Generation | Qwen3-VL-30B-A3B-Instruct |
|
| 27 |
+
| Embeddings | Qwen3-VL-Embedding-8B |
|
| 28 |
+
| Reranker | Qwen3-VL-Reranker-8B |
|
| 29 |
+
| Vector Store | ChromaDB 0.4.x |
|
| 30 |
+
| Validation | Pydantic 2.x |
|
| 31 |
+
| PDF Generation | Pandoc 3.x |
|
| 32 |
+
| Package Manager | pip + requirements.txt |
|
| 33 |
+
|
| 34 |
+
## Development Commands
|
| 35 |
+
|
| 36 |
+
```sh
|
| 37 |
+
# Install dependencies
|
| 38 |
+
pip install -r requirements.txt
|
| 39 |
+
|
| 40 |
+
# Run locally with mock models
|
| 41 |
+
MOCK_MODELS=true python app.py
|
| 42 |
+
|
| 43 |
+
# Run with real models (HuggingFace only - requires A100)
|
| 44 |
+
python app.py
|
| 45 |
+
|
| 46 |
+
# Recommended tooling (install as dev dependencies)
|
| 47 |
+
ruff check . # Linting
|
| 48 |
+
ruff format . # Formatting
|
| 49 |
+
pytest tests/ -v # Testing
|
| 50 |
+
mypy . # Type checking
|
| 51 |
+
```
|
| 52 |
+
|
| 53 |
+
## Architecture
|
| 54 |
+
|
| 55 |
+
### 6-Stage Processing Pipeline
|
| 56 |
+
1. **Input Validation** - Pydantic schema validation (schemas/input.py)
|
| 57 |
+
2. **Vision Analysis** - Per-image zone/material/condition detection (pipeline/vision.py)
|
| 58 |
+
3. **RAG Retrieval** - Disposition lookup, thresholds, methods (rag/retriever.py)
|
| 59 |
+
4. **FDAM Logic** - Disposition matrix application (pipeline/main.py)
|
| 60 |
+
5. **Calculations** - Surface areas, ACH, labor estimates (pipeline/calculations.py)
|
| 61 |
+
6. **Document Generation** - SOW, sampling plan, confidence report (pipeline/generator.py)
|
| 62 |
+
|
| 63 |
+
### Target Project Structure
|
| 64 |
+
```
|
| 65 |
+
├── app.py # Gradio entry point
|
| 66 |
+
├── config/ # Inference and app settings
|
| 67 |
+
├── models/ # Model loading (mock vs real)
|
| 68 |
+
├── rag/ # Chunking, vectorstore, retrieval
|
| 69 |
+
├── schemas/ # Pydantic input/output models
|
| 70 |
+
├── pipeline/ # Main processing logic
|
| 71 |
+
├── ui/ # Gradio UI components
|
| 72 |
+
├── RAG-KB/ # Knowledge base source files
|
| 73 |
+
├── chroma_db/ # ChromaDB persistence (generated)
|
| 74 |
+
└── tests/
|
| 75 |
+
```
|
| 76 |
+
|
| 77 |
+
## Domain Knowledge
|
| 78 |
+
|
| 79 |
+
### Zone Classifications
|
| 80 |
+
- **Burn Zone**: Direct fire involvement, structural char, exposed/damaged elements
|
| 81 |
+
- **Near-Field**: Adjacent to burn zone, heavy smoke/heat exposure, visible contamination
|
| 82 |
+
- **Far-Field**: Smoke migration only, light deposits, no structural damage
|
| 83 |
+
|
| 84 |
+
### Condition Levels
|
| 85 |
+
- **Background**: No visible contamination
|
| 86 |
+
- **Light**: Faint discoloration, minimal deposits
|
| 87 |
+
- **Moderate**: Visible film/deposits, surface color altered
|
| 88 |
+
- **Heavy**: Thick deposits, surface texture obscured
|
| 89 |
+
- **Structural Damage**: Physical damage requiring repair before cleaning
|
| 90 |
+
|
| 91 |
+
### Dispositions (FDAM §4.3)
|
| 92 |
+
- **No Action**: Document only
|
| 93 |
+
- **Clean**: Standard cleaning protocol
|
| 94 |
+
- **Evaluate**: Requires professional judgment
|
| 95 |
+
- **Remove**: Material must be removed
|
| 96 |
+
- **Remove/Repair**: Remove and repair/replace
|
| 97 |
+
|
| 98 |
+
### Facility Classifications (affects thresholds)
|
| 99 |
+
- **Operational**: Active workplace (higher thresholds: 500 µg/100cm² lead)
|
| 100 |
+
- **Non-Operational**: Unoccupied (lower thresholds: 22 µg/100cm² lead)
|
| 101 |
+
- **Public/Childcare**: Most stringent (EPA/HUD Oct 2024: 0.54 µg/100cm² floors)
|
| 102 |
+
|
| 103 |
+
### Key Calculations
|
| 104 |
+
- **ACH Formula**: `Units = (Volume × 4) / (CFM × 60)` per NADCA ACR 2021
|
| 105 |
+
- **Sample Density**: Varies by area size per FDAM §2.3
|
| 106 |
+
- **Ceiling Deck**: Enhanced sampling (1 per 2,500 SF per FDAM §4.5)
|
| 107 |
+
|
| 108 |
+
## RAG Knowledge Base
|
| 109 |
+
|
| 110 |
+
Source documents in `/RAG-KB/`:
|
| 111 |
+
- FDAM v4.0.1 methodology (primary reference)
|
| 112 |
+
- BNL SOP IH75190 (metals clearance thresholds)
|
| 113 |
+
- IICRC/RIA/CIRI Technical Guide (wildfire restoration)
|
| 114 |
+
- Lab method guides (PLM, ICP-MS)
|
| 115 |
+
|
| 116 |
+
**Chunking rules:**
|
| 117 |
+
- Keep tables intact (never split markdown tables)
|
| 118 |
+
- Preserve headers with content
|
| 119 |
+
- Include metadata (source, category, section)
|
| 120 |
+
|
| 121 |
+
## Confidence Framework
|
| 122 |
+
|
| 123 |
+
| Score | Level | Action |
|
| 124 |
+
|-------|-------|--------|
|
| 125 |
+
| ≥90% | Very High | Accept without review |
|
| 126 |
+
| 70-89% | High | Accept, note in report |
|
| 127 |
+
| 50-69% | Moderate | Flag for human review |
|
| 128 |
+
| <50% | Low | Require human verification |
|
| 129 |
+
|
| 130 |
+
## Multi-GPU Model Loading
|
| 131 |
+
|
| 132 |
+
The 4xL4 setup requires models to be distributed across GPUs. Use `device_map="auto"` in transformers:
|
| 133 |
+
|
| 134 |
+
```python
|
| 135 |
+
model = AutoModel.from_pretrained(
|
| 136 |
+
"Qwen/Qwen3-VL-30B-A3B-Instruct",
|
| 137 |
+
torch_dtype=torch.bfloat16,
|
| 138 |
+
device_map="auto", # Automatically distributes across available GPUs
|
| 139 |
+
trust_remote_code=True
|
| 140 |
+
)
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
Expected distribution (BF16, ~90GB total):
|
| 144 |
+
- Vision model (30B): ~58GB spread across GPUs via device_map="auto"
|
| 145 |
+
- Embedding model (8B): ~16GB
|
| 146 |
+
- Reranker model (8B): ~16GB
|
| 147 |
+
- Headroom: ~6GB for KV cache
|
| 148 |
+
|
| 149 |
+
**Fallback**: If VRAM issues arise, use `Qwen/Qwen3-VL-8B-Instruct` (~16GB) instead of 30B
|
| 150 |
+
|
| 151 |
+
## Local Development Strategy
|
| 152 |
+
|
| 153 |
+
The RTX 4090 (24GB VRAM) cannot run the full model stack (~90GB required). Use this workflow:
|
| 154 |
+
|
| 155 |
+
1. Set `MOCK_MODELS=true` environment variable
|
| 156 |
+
2. Mock responses return realistic JSON matching vision output schema
|
| 157 |
+
3. Test pipeline logic, UI, calculations without real inference
|
| 158 |
+
4. Deploy to HuggingFace Spaces for real model testing
|
| 159 |
+
5. Request build logs after deployment to confirm success
|
| 160 |
+
|
| 161 |
+
## Code Style
|
| 162 |
+
|
| 163 |
+
- Use `Literal["a", "b", "c"]` unions instead of Enum for simple string choices
|
| 164 |
+
- Pydantic models for all input/output validation
|
| 165 |
+
- Explicit return types on public functions
|
| 166 |
+
- Result types or explicit error returns over thrown exceptions
|
| 167 |
+
- Group imports: stdlib → third-party → local
|
| 168 |
+
|
| 169 |
+
## WSL Note
|
| 170 |
+
|
| 171 |
+
Dev servers must be exposed for WSL access. Use `--host 0.0.0.0` with Gradio:
|
| 172 |
+
```python
|
| 173 |
+
app.launch(server_name="0.0.0.0", server_port=7860)
|
| 174 |
+
```
|
FDAM_AI_Pipeline_Technical_Spec.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
RAG-KB/FDAM_v4_METHODOLOGY.md
ADDED
|
@@ -0,0 +1,994 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# FDAM: Fire Damage Assessment Methodology
|
| 2 |
+
|
| 3 |
+
## A Systematic Framework for Fire Restoration Industrial Hygiene Documentation
|
| 4 |
+
|
| 5 |
+
**Version 4.0.1 | January 2026**
|
| 6 |
+
|
| 7 |
+
**Developed in partnership:** IHC and GVO
|
| 8 |
+
**Empirical Validation Analysis:** January 2026 (QVC Distribution Center March 2023, Our Lady of Victory February 2025)
|
| 9 |
+
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
## Document Control
|
| 13 |
+
|
| 14 |
+
| Version | Date | Changes |
|
| 15 |
+
|---------|------|---------|
|
| 16 |
+
| 3.0 | January 2026 | Standards verification; ACH revised to 4 minimum per NADCA ACR 2021; metals aligned with BNL SOP IH75190; Public/Childcare lead updated to EPA/HUD October 2024 |
|
| 17 |
+
| 4.0 | January 2026 | Empirical validation integration; dual lab format support; regulatory justification blocks; ceiling deck protocols; reclean/retest procedures; deliverable consolidation; appendix restructure |
|
| 18 |
+
| 4.0.1 | January 2026 | EAA Method Guide integration: combustion particle definitions (soot/char/ash); qualitative observation checklist; unit conversion reference (cts/mm² to cts/cm²); EAA classification cross-reference |
|
| 19 |
+
|
| 20 |
+
---
|
| 21 |
+
|
| 22 |
+
## Executive Summary
|
| 23 |
+
|
| 24 |
+
FDAM is a systematic framework for assessing fire-damaged properties and generating scientifically defensible restoration documentation. The methodology synthesizes regulatory standards, industry guidance, and empirical field data from IHC fire restoration projects.
|
| 25 |
+
|
| 26 |
+
**FDAM produces three deliverables:**
|
| 27 |
+
|
| 28 |
+
1. **Cleaning Specification / Scope of Work** — Scope, methods, labor, equipment, and acceptance criteria
|
| 29 |
+
2. **Results Interpretation** — Threshold justification, regulatory basis, and pass/fail determination
|
| 30 |
+
3. **Executive Summary Report** — Completion verification and compliance documentation
|
| 31 |
+
|
| 32 |
+
**Standards Basis:**
|
| 33 |
+
- Metals clearance: BNL SOP IH75190 (Rev23, 06/23/17)
|
| 34 |
+
- Non-Operational alternative: Army/Air Force National Guard Indoor Firing Range Guidelines (200 µg/ft²)
|
| 35 |
+
- Air filtration: NADCA ACR 2021 (4 ACH minimum)
|
| 36 |
+
- Zone framework: IICRC/RIA/CIRI Technical Guide (December 2025)
|
| 37 |
+
- Particulate clearance: IHC professional judgment with empirical validation
|
| 38 |
+
|
| 39 |
+
---
|
| 40 |
+
|
| 41 |
+
## Part 1: Methodology Foundation
|
| 42 |
+
|
| 43 |
+
### 1.1 Scientific Basis
|
| 44 |
+
|
| 45 |
+
FDAM synthesizes:
|
| 46 |
+
|
| 47 |
+
- **Regulatory frameworks:** OSHA Technical Manual, NIOSH sampling methods, EPA clearance standards
|
| 48 |
+
- **Industry standards:** IICRC S700/S760, IICRC/RIA/CIRI Technical Guide, NADCA ACR, RIA Fire & Smoke Damage Repair
|
| 49 |
+
- **Published guidance:** BNL SOP IH75190, AIHA Technical Guide for Wildfire Impact Assessments
|
| 50 |
+
- **Empirical validation:** IHC field data from commercial fire restoration projects (see Appendix B)
|
| 51 |
+
|
| 52 |
+
### 1.2 Regulatory Framework
|
| 53 |
+
|
| 54 |
+
| Source | Application | Status |
|
| 55 |
+
|--------|-------------|--------|
|
| 56 |
+
| BNL SOP IH75190 (Rev23) | Surface wipe clearance for metals | **Primary - verified** |
|
| 57 |
+
| Army/Air Force National Guard Guidelines | Non-Operational lead alternative (200 µg/ft²) | **Primary - verified** |
|
| 58 |
+
| EPA/HUD Lead Dust Hazard Standards (October 2024) | Public/Childcare lead clearance | **Primary - verified** |
|
| 59 |
+
| OSHA Technical Manual, Section II Ch. 2 | Surface contaminant methodology, facility classification | Referenced |
|
| 60 |
+
| NIOSH Method 9100 | Surface wipe sampling procedures | Referenced |
|
| 61 |
+
| 29 CFR 1910.1025 | Lead housekeeping requirements | Referenced |
|
| 62 |
+
| 29 CFR 1910.1018 | Arsenic housekeeping requirements | Referenced |
|
| 63 |
+
| 29 CFR 1910.1027 | Cadmium housekeeping requirements | Referenced |
|
| 64 |
+
| NADCA ACR 2021 | Air filtration requirements | **Primary - verified** |
|
| 65 |
+
| IICRC/RIA/CIRI Technical Guide (Dec 2025) | Zone-based assessment | **Primary - verified** |
|
| 66 |
+
| IICRC S520 | Mold remediation (cross-reference for fungal co-occurrence) | Referenced |
|
| 67 |
+
|
| 68 |
+
### 1.3 Threshold Classification
|
| 69 |
+
|
| 70 |
+
**Standards-Based Thresholds:** Values from published, peer-reviewed, or regulatory sources with explicit citations.
|
| 71 |
+
|
| 72 |
+
**Professional Judgment Thresholds:** Values developed through field experience where no published standards exist. Explicitly labeled with empirical validation data where available.
|
| 73 |
+
|
| 74 |
+
### 1.4 Metals Clearance Thresholds
|
| 75 |
+
|
| 76 |
+
**Source:** BNL SOP IH75190, Attachment 9.3 (Rev23, 06/23/17)
|
| 77 |
+
|
| 78 |
+
| Metal | Non-Operational | Operational | Unit | Regulatory Basis |
|
| 79 |
+
|-------|-----------------|-------------|------|------------------|
|
| 80 |
+
| Lead (Pb) | 22 | 500 | µg/100cm² | 29 CFR 1910.1025 |
|
| 81 |
+
| Cadmium (Cd) | 3.3 | 50 | µg/100cm² | 29 CFR 1910.1027 |
|
| 82 |
+
| Arsenic (As) | 6.7 | 100 | µg/100cm² | 29 CFR 1910.1018 |
|
| 83 |
+
|
| 84 |
+
**Unit Conversions:**
|
| 85 |
+
- µg/100cm² × 9.29 = µg/ft²
|
| 86 |
+
- Lead Non-Op: 22 µg/100cm² ≈ 204 µg/ft²
|
| 87 |
+
|
| 88 |
+
**Alternative Non-Operational Reference:**
|
| 89 |
+
Army and Air Force National Guard "Guidelines and Procedures for Rehabilitation and Conversion of Indoor Firing Ranges" establishes 200 µg/ft² as acceptable surface contamination for spaces converted to general use. This is consistent with BNL Non-Operational threshold (22 µg/100cm² ≈ 204 µg/ft²).
|
| 90 |
+
|
| 91 |
+
**Public/Childcare Thresholds (EPA/HUD October 2024):**
|
| 92 |
+
|
| 93 |
+
| Surface | Threshold | Unit |
|
| 94 |
+
|---------|-----------|------|
|
| 95 |
+
| Floors | 0.54 | µg/100cm² |
|
| 96 |
+
| Window Sills | 4.3 | µg/100cm² |
|
| 97 |
+
| Window Troughs | 4.3 | µg/100cm² |
|
| 98 |
+
|
| 99 |
+
### 1.5 Combustion Particle Definitions
|
| 100 |
+
|
| 101 |
+
Fire/combustion residue particles are classified into three categories based on combustion process:
|
| 102 |
+
|
| 103 |
+
| Category | Definition | Morphology |
|
| 104 |
+
|----------|------------|------------|
|
| 105 |
+
| **Soot** | Residues from combustion of organic resins and compounds | Aciniform (grape-like clusters); fine spherical particles; optically opaque |
|
| 106 |
+
| **Char** | Incomplete combustion of cellulose/vegetation material | Irregular angular fragments; carbonized plant structure visible; variable size |
|
| 107 |
+
| **Ash** | Residual mineral elements remaining after complete combustion (Ca, Na, Mg, K salts) | Irregular crystalline; often white/gray; variable opacity |
|
| 108 |
+
|
| 109 |
+
Source: Environmental Analysis Associates, Air-O-Cell Method Guide & Particle Atlas (2018)
|
| 110 |
+
|
| 111 |
+
**Laboratory Reporting Note:** Some laboratories report "Ash and Char" as a combined category. When combined reporting is used, interpret results against the Ash/Char threshold. When separated, sum the values for threshold comparison unless laboratory provides specific guidance.
|
| 112 |
+
|
| 113 |
+
### 1.6 Particulate Clearance Thresholds
|
| 114 |
+
|
| 115 |
+
**Classification:** Professional Judgment with Empirical Validation
|
| 116 |
+
|
| 117 |
+
| Analyte | Clearance Threshold | Unit | Validation Status |
|
| 118 |
+
|---------|---------------------|------|-------------------|
|
| 119 |
+
| Ash and Char (combined) | < 150 | particles/cm² | Validated (97.8% pass rate, n=45) |
|
| 120 |
+
| Aciniform Soot | < 500 | particles/cm² | Validated (91.1% pass rate, n=45) |
|
| 121 |
+
| Cellulose/Synthetic Fibers | < 500 | particles/cm² | Professional judgment |
|
| 122 |
+
| Silicates | < 1,500 | particles/cm² | Professional judgment |
|
| 123 |
+
|
| 124 |
+
**Laboratory Reference Comparison:**
|
| 125 |
+
|
| 126 |
+
| Particle Type | Lab "Normal" Range | FDAM Clearance | Position |
|
| 127 |
+
|---------------|-------------------|----------------|----------|
|
| 128 |
+
| Ash/Char | 0-300/cm² | < 150/cm² | 50% of upper normal |
|
| 129 |
+
| Aciniform Soot | 0-800/cm² | < 500/cm² | 62.5% of upper normal |
|
| 130 |
+
|
| 131 |
+
Source: Hayes Microbial Consulting, Estimated Normal Ranges based on ASTM D6602
|
| 132 |
+
|
| 133 |
+
FDAM clearance thresholds are set below laboratory "normal" ranges to ensure post-restoration surfaces are demonstrably cleaner than typical unaffected environments.
|
| 134 |
+
|
| 135 |
+
**Empirical Validation Summary:**
|
| 136 |
+
- Dataset: 45 post-restoration samples (QVC Distribution Center, March 2023)
|
| 137 |
+
- Pass rate at current thresholds: 93.3%
|
| 138 |
+
- Typical achievable post-cleaning levels: 5-15/cm² (both particle types)
|
| 139 |
+
- See Appendix B for complete analysis
|
| 140 |
+
|
| 141 |
+
**Application:**
|
| 142 |
+
- Evaluate in conjunction with visual inspection and odor assessment
|
| 143 |
+
- Compare to control/background samples from unaffected areas
|
| 144 |
+
- Results interpreted by qualified industrial hygienist
|
| 145 |
+
|
| 146 |
+
---
|
| 147 |
+
|
| 148 |
+
## Part 2: Assessment Workflow
|
| 149 |
+
|
| 150 |
+
### 2.1 Project Phases
|
| 151 |
+
|
| 152 |
+
```
|
| 153 |
+
PHASE 1: PRE (Pre-Restoration Evaluation)
|
| 154 |
+
├── Site inspection and documentation
|
| 155 |
+
├── Contamination mapping
|
| 156 |
+
├── Material inventory
|
| 157 |
+
├── Zone classification (Burn/Near-Field/Far-Field)
|
| 158 |
+
└── Output: Preliminary findings, PRA recommendation
|
| 159 |
+
|
| 160 |
+
PHASE 2: PRA (Pre-Restoration Assessment)
|
| 161 |
+
├── Sampling plan development
|
| 162 |
+
├── Tape lift and surface wipe collection
|
| 163 |
+
├── Laboratory analysis
|
| 164 |
+
├── Results interpretation
|
| 165 |
+
└── Output: CLEANING SPECIFICATION / SCOPE OF WORK
|
| 166 |
+
|
| 167 |
+
PHASE 3: RESTORATION (Contractor Execution)
|
| 168 |
+
├── Work performed per specification
|
| 169 |
+
└── Output: Completion notification
|
| 170 |
+
|
| 171 |
+
PHASE 4: PRV (Post-Restoration Verification)
|
| 172 |
+
├── Verification sampling
|
| 173 |
+
├── Laboratory analysis
|
| 174 |
+
├── Pass/fail determination
|
| 175 |
+
├── Reclean/retest if required
|
| 176 |
+
└── Output: EXECUTIVE SUMMARY REPORT
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
### 2.2 Phase 1: Pre-Restoration Evaluation (PRE)
|
| 180 |
+
|
| 181 |
+
**Field Activities:**
|
| 182 |
+
|
| 183 |
+
| Activity | Method | Data Captured |
|
| 184 |
+
|----------|--------|---------------|
|
| 185 |
+
| Site walk-through | Visual inspection | Affected areas, impact severity by zone |
|
| 186 |
+
| Odor assessment | Sensory | Presence/intensity/location of smoke odor |
|
| 187 |
+
| White wipe test | Clean cloth on surfaces | Preliminary contamination indicator |
|
| 188 |
+
| Photo documentation | Camera/device | Conditions, damage, access constraints |
|
| 189 |
+
| Material inventory | Visual identification | Surface types, quantities, restorability |
|
| 190 |
+
| Dimensional survey | Manual measurement | Room dimensions, surface areas |
|
| 191 |
+
| Zone classification | Distance from fire origin | Burn Zone / Near-Field / Far-Field |
|
| 192 |
+
|
| 193 |
+
**PRE Decision Logic:**
|
| 194 |
+
|
| 195 |
+
```
|
| 196 |
+
IF visible contamination is widespread
|
| 197 |
+
OR odor is significant
|
| 198 |
+
OR white wipe test shows deposits
|
| 199 |
+
OR materials of concern present
|
| 200 |
+
OR property is in Burn Zone or Near-Field Zone
|
| 201 |
+
THEN → Recommend PRA (laboratory assessment)
|
| 202 |
+
|
| 203 |
+
IF contamination is superficial
|
| 204 |
+
AND limited to small area
|
| 205 |
+
AND no materials of concern
|
| 206 |
+
AND Far-Field Zone only
|
| 207 |
+
THEN → May proceed directly to cleaning specification
|
| 208 |
+
```
|
| 209 |
+
|
| 210 |
+
### 2.3 Phase 2: Pre-Restoration Assessment (PRA)
|
| 211 |
+
|
| 212 |
+
**Sampling Protocol:**
|
| 213 |
+
|
| 214 |
+
*Tape Lift Samples (Particulate Identification):*
|
| 215 |
+
- Minimum 1 per distinct surface type per zone
|
| 216 |
+
- Additional samples at contamination gradients
|
| 217 |
+
- Control samples from unaffected areas (recommended)
|
| 218 |
+
- Analysis: Polarized light microscopy (PLM)
|
| 219 |
+
|
| 220 |
+
*Surface Wipe Samples (Metals Quantification):*
|
| 221 |
+
- Per NIOSH Method 9100 / BNL SOP IH75190
|
| 222 |
+
- 100 cm² sample area (10cm × 10cm template)
|
| 223 |
+
- Ghost Wipes or equivalent pre-moistened media
|
| 224 |
+
- Analysis: ICP-MS or ICP-OES at AIHA-accredited laboratory
|
| 225 |
+
|
| 226 |
+
**Sample Density Guidelines:**
|
| 227 |
+
|
| 228 |
+
| Area Size | Tape Lifts | Surface Wipes |
|
| 229 |
+
|-----------|------------|---------------|
|
| 230 |
+
| < 5,000 SF | 3-5 per surface type | 3-5 per surface type |
|
| 231 |
+
| 5,000 - 25,000 SF | 5-10 per surface type | 5-10 per surface type |
|
| 232 |
+
| 25,000 - 100,000 SF | 10-20 per surface type | 10-15 per surface type |
|
| 233 |
+
| > 100,000 SF | 20+ per surface type | 15-25 per surface type |
|
| 234 |
+
|
| 235 |
+
**Ceiling Deck Sample Density (Enhanced):**
|
| 236 |
+
Empirical data indicates ceiling deck surfaces exhibit higher post-cleaning contamination rates (82.4% pass rate vs 95%+ for other structural surfaces). For ceiling decks:
|
| 237 |
+
- Increase sample density by 50% above standard guidelines
|
| 238 |
+
- Minimum 1 sample per 2,500 SF (vs standard 1 per 5,000 SF)
|
| 239 |
+
|
| 240 |
+
**Qualitative Observation Checklist:**
|
| 241 |
+
|
| 242 |
+
Document the following at each sample location:
|
| 243 |
+
|
| 244 |
+
| Observation | Response | Notes |
|
| 245 |
+
|-------------|----------|-------|
|
| 246 |
+
| Smoke/fire odor present? | Yes / No | Intensity if present |
|
| 247 |
+
| Visible soot deposits? | Yes / No | Describe pattern |
|
| 248 |
+
| Large char particles observed? | Yes / No | Estimated density |
|
| 249 |
+
| Ash-like residue present? | Yes / No | Color, texture |
|
| 250 |
+
| Surface discoloration? | Yes / No | Describe |
|
| 251 |
+
| Dust loading or interference? | Yes / No | May affect lab accuracy |
|
| 252 |
+
| Burned soil/pollen/vegetation indicators? | Yes / No | Wildfire indicator |
|
| 253 |
+
|
| 254 |
+
This checklist supports visual-to-lab correlation and identifies potential analytical interferences.
|
| 255 |
+
|
| 256 |
+
### 2.4 Phase 4: Post-Restoration Verification (PRV)
|
| 257 |
+
|
| 258 |
+
**Verification Protocol:**
|
| 259 |
+
1. Visual inspection for dust-free surfaces
|
| 260 |
+
2. Odor assessment (no detectable fire/smoke odor)
|
| 261 |
+
3. Verification sampling (same methods as PRA)
|
| 262 |
+
4. Laboratory analysis
|
| 263 |
+
5. Results comparison to clearance criteria
|
| 264 |
+
6. Pass/fail determination by area
|
| 265 |
+
|
| 266 |
+
**PRV Decision Logic:**
|
| 267 |
+
|
| 268 |
+
```
|
| 269 |
+
IF all samples pass clearance thresholds
|
| 270 |
+
AND visual inspection confirms dust-free
|
| 271 |
+
AND no detectable odor
|
| 272 |
+
THEN → Issue clearance, generate Executive Summary
|
| 273 |
+
|
| 274 |
+
IF any samples exceed thresholds
|
| 275 |
+
THEN → Execute Reclean/Retest Protocol (Section 5.4)
|
| 276 |
+
```
|
| 277 |
+
|
| 278 |
+
---
|
| 279 |
+
|
| 280 |
+
## Part 3: Facility Classification
|
| 281 |
+
|
| 282 |
+
### 3.1 Classification Categories
|
| 283 |
+
|
| 284 |
+
| Classification | Definition | Lead Threshold | Applicable Standards |
|
| 285 |
+
|----------------|------------|----------------|---------------------|
|
| 286 |
+
| Operational | OSHA regulated substance used; workers trained; hygiene controls in place | 500 µg/100cm² | BNL SOP IH75190 Operational |
|
| 287 |
+
| Non-Operational | No regulated substance use; workers not trained; eating/drinking permitted | 22 µg/100cm² | BNL SOP IH75190 Non-Operational |
|
| 288 |
+
| Public-Childcare | Schools, daycare, child-occupied facilities | 0.54 µg/100cm² (floors) | EPA/HUD October 2024 |
|
| 289 |
+
|
| 290 |
+
### 3.2 Classification Determination
|
| 291 |
+
|
| 292 |
+
Facility classification is a professional judgment decision documented in the Results Interpretation deliverable. The determination considers:
|
| 293 |
+
|
| 294 |
+
- Facility use and occupancy type
|
| 295 |
+
- Presence of OSHA regulated substances
|
| 296 |
+
- Worker training status
|
| 297 |
+
- Personal hygiene controls (eating/drinking restrictions, handwashing requirements)
|
| 298 |
+
- Occupant populations (children, general public, trained workers)
|
| 299 |
+
|
| 300 |
+
### 3.3 Regulatory Justification Blocks
|
| 301 |
+
|
| 302 |
+
**Non-Operational Commercial/Industrial:**
|
| 303 |
+
|
| 304 |
+
> The indoor environment within [FACILITY] is comparable to the definition of a "Non-Operational Area" per OSHA Technical Manual Section II Chapter 2: an area where an OSHA Regulated Substance is not used and where workers are not trained in hazards and controls. Personal hygiene control practices are not in place (hand washing is not expected on exiting the area) and eating & drinking are permitted.
|
| 305 |
+
>
|
| 306 |
+
> The applicable standard for measuring cleaning performance is derived from BNL SOP IH75190 "Surface Wipe Sampling for Metals" (Rev23, 06/23/17), which establishes 22 µg/100cm² (≈204 µg/ft²) for Non-Operational areas. This threshold is consistent with the Army and Air Force National Guard "Guidelines and Procedures for Rehabilitation and Conversion of Indoor Firing Ranges" which establishes 200 µg/ft² as acceptable for spaces converted to general use.
|
| 307 |
+
>
|
| 308 |
+
> OSHA housekeeping provisions (29 CFR 1910.1025, 1910.1018, 1910.1027) require surfaces be maintained "as free as practicable" of accumulations of regulated metals.
|
| 309 |
+
|
| 310 |
+
**Operational Industrial:**
|
| 311 |
+
|
| 312 |
+
> [FACILITY] meets the definition of an "Operational Area" per OSHA Technical Manual Section II Chapter 2: an area where workers are routinely in the presence of an OSHA Regulated Substance as part of their work activity. Workers who handle the substance have been trained in hazards and controls. Substances are routinely used, handled or stored and personal hygiene control practices are in place.
|
| 313 |
+
>
|
| 314 |
+
> The applicable standard is BNL SOP IH75190 Operational threshold of 500 µg/100cm² for lead.
|
| 315 |
+
|
| 316 |
+
**Public-Childcare:**
|
| 317 |
+
|
| 318 |
+
> [FACILITY] is classified as a child-occupied facility subject to EPA/HUD Lead Dust Hazard Standards (October 2024). These standards establish protective thresholds for environments where children may be present.
|
| 319 |
+
>
|
| 320 |
+
> Applicable thresholds: 0.54 µg/100cm² (floors), 4.3 µg/100cm² (window sills and troughs).
|
| 321 |
+
|
| 322 |
+
---
|
| 323 |
+
|
| 324 |
+
## Part 4: Surface Assessment
|
| 325 |
+
|
| 326 |
+
### 4.1 Zone Classification
|
| 327 |
+
|
| 328 |
+
**Source:** IICRC/RIA/CIRI Technical Guide for Wildfire Restoration (December 2025)
|
| 329 |
+
|
| 330 |
+
| Zone | Definition | Typical Characteristics |
|
| 331 |
+
|------|------------|------------------------|
|
| 332 |
+
| Burn Zone | Direct fire involvement | Structural damage, char, complete combustion |
|
| 333 |
+
| Near-Field | Adjacent to burn zone, heavy smoke/heat exposure | Heavy soot deposits, heat damage, strong odor |
|
| 334 |
+
| Far-Field | Smoke migration without direct heat exposure | Light to moderate deposits, odor, no structural damage |
|
| 335 |
+
|
| 336 |
+
### 4.2 Condition Scale
|
| 337 |
+
|
| 338 |
+
| Condition | Visual Indicators |
|
| 339 |
+
|-----------|-------------------|
|
| 340 |
+
| Background | No visible contamination; equivalent to unaffected areas |
|
| 341 |
+
| Light | Faint discoloration; minimal deposits visible on white wipe |
|
| 342 |
+
| Moderate | Visible film or deposits; clear contamination on white wipe |
|
| 343 |
+
| Heavy | Thick deposits; surface texture obscured; strong odor |
|
| 344 |
+
| Structural Damage | Physical damage requiring repair before cleaning |
|
| 345 |
+
|
| 346 |
+
### 4.3 Disposition Matrix
|
| 347 |
+
|
| 348 |
+
**Non-Porous Surfaces (Steel, Concrete, Glass, Metal):**
|
| 349 |
+
|
| 350 |
+
| Zone | Condition | Disposition | Protocol |
|
| 351 |
+
|------|-----------|-------------|----------|
|
| 352 |
+
| Any | Background | No action | Document only |
|
| 353 |
+
| Far-Field | Light | Clean | Standard protocol |
|
| 354 |
+
| Far-Field | Moderate | Clean | Full protocol |
|
| 355 |
+
| Near-Field | Light | Clean | Full protocol |
|
| 356 |
+
| Near-Field | Moderate | Clean | Aggressive protocol, multiple passes |
|
| 357 |
+
| Near-Field | Heavy | Clean | Aggressive protocol with verification sampling |
|
| 358 |
+
| Burn Zone | Any restorable | Clean | Post-structural repair; aggressive protocol |
|
| 359 |
+
| Any | Structural Damage | Remove/Repair | Beyond cleaning scope |
|
| 360 |
+
|
| 361 |
+
**Porous/Semi-Porous Surfaces (Drywall, Carpet, Insulation, Acoustic Tile):**
|
| 362 |
+
|
| 363 |
+
| Zone | Condition | Disposition | Rationale |
|
| 364 |
+
|------|-----------|-------------|-----------|
|
| 365 |
+
| Far-Field | Background | Evaluate | May clean if truly superficial |
|
| 366 |
+
| Far-Field | Light | Evaluate/Clean | Assessment determines restorability |
|
| 367 |
+
| Far-Field | Moderate+ | Remove | Porous materials absorb contaminants |
|
| 368 |
+
| Near-Field | Light+ | Remove | Porous materials absorb contaminants and VOCs |
|
| 369 |
+
| Burn Zone | Any | Remove | Cannot effectively decontaminate |
|
| 370 |
+
|
| 371 |
+
### 4.4 Material Disposition Categories
|
| 372 |
+
|
| 373 |
+
**Tier 1: Generally Replace When Fire/Smoke Affected**
|
| 374 |
+
|
| 375 |
+
| Material | Rationale |
|
| 376 |
+
|----------|-----------|
|
| 377 |
+
| Fiberglass insulation | Absorbs particulates and VOCs into fiber matrix |
|
| 378 |
+
| Flexible ductwork | Interior lining absorbs contaminants; cannot effectively clean |
|
| 379 |
+
| HVAC duct interior insulation | Porous material in air pathway; recontamination risk |
|
| 380 |
+
| Mattresses and bedding | Multi-layer foam construction; deep penetration |
|
| 381 |
+
|
| 382 |
+
**Tier 2: Assess Based on Condition**
|
| 383 |
+
|
| 384 |
+
| Material | Clean When | Remove When |
|
| 385 |
+
|----------|------------|-------------|
|
| 386 |
+
| Carpet and pad | Far-Field, Light | Near-Field, Moderate+ |
|
| 387 |
+
| Drop ceiling tile | Far-Field, Light, smooth | Near-Field, or textured/acoustic |
|
| 388 |
+
| Drywall (painted) | Far-Field, Light | Near-Field Moderate+, or unpainted |
|
| 389 |
+
| Upholstered furniture | Far-Field, Light, high value | Near-Field, or low value |
|
| 390 |
+
|
| 391 |
+
**Tier 3: Generally Cleanable**
|
| 392 |
+
|
| 393 |
+
| Material | Standard Protocol |
|
| 394 |
+
|----------|-------------------|
|
| 395 |
+
| Structural steel | HEPA vac → wet wipe → rinse |
|
| 396 |
+
| Concrete (sealed) | Scrubber or power wash |
|
| 397 |
+
| Metal doors/frames | Wet wipe → rinse |
|
| 398 |
+
| Glass/windows | Wet wipe → squeegee |
|
| 399 |
+
| Smooth rigid ductwork | Per NADCA ACR |
|
| 400 |
+
|
| 401 |
+
### 4.5 Ceiling Deck Protocol
|
| 402 |
+
|
| 403 |
+
Empirical data indicates ceiling deck surfaces require enhanced attention:
|
| 404 |
+
|
| 405 |
+
**Finding:** 82.4% pass rate for ceiling decks vs 95%+ for other structural surfaces (n=45, QVC dataset)
|
| 406 |
+
|
| 407 |
+
**Requirements:**
|
| 408 |
+
- Increase PRV sample density by 50%
|
| 409 |
+
- Consider additional cleaning pass before PRV
|
| 410 |
+
- Document access method and cleaning thoroughness
|
| 411 |
+
- Priority surface for reclean if failures occur
|
| 412 |
+
|
| 413 |
+
### 4.6 Secondary Contamination
|
| 414 |
+
|
| 415 |
+
If fungal/mold growth is identified during fire damage assessment:
|
| 416 |
+
- Document presence, type, and extent
|
| 417 |
+
- Cross-reference IICRC S520 for remediation protocols
|
| 418 |
+
- Address fire damage and biological contamination as separate scopes
|
| 419 |
+
- Sequential remediation may be required (mold first if active growth)
|
| 420 |
+
|
| 421 |
+
---
|
| 422 |
+
|
| 423 |
+
## Part 5: Cleaning Protocol Framework
|
| 424 |
+
|
| 425 |
+
### 5.1 Standard Cleaning Sequence
|
| 426 |
+
|
| 427 |
+
```
|
| 428 |
+
Step 1: HEPA Vacuum
|
| 429 |
+
└── Remove loose particulate from all surfaces
|
| 430 |
+
|
| 431 |
+
Step 2: Dry Sponge (if needed)
|
| 432 |
+
└── Chemical sponge for char/soot on non-porous surfaces
|
| 433 |
+
|
| 434 |
+
Step 3: Wet Wipe - Alkaline Detergent
|
| 435 |
+
└── pH 10-12 solution for chemical residue removal
|
| 436 |
+
|
| 437 |
+
Step 4: Rinse Wipe
|
| 438 |
+
└── Clean water to remove detergent residue
|
| 439 |
+
|
| 440 |
+
Step 5: Degreaser (if needed)
|
| 441 |
+
└── For stubborn residues not removed by standard protocol
|
| 442 |
+
```
|
| 443 |
+
|
| 444 |
+
**Sequencing Rule:** Clean top-down (roof deck → structure → walls → floor) to prevent recontamination.
|
| 445 |
+
|
| 446 |
+
### 5.2 Surface-Specific Methods
|
| 447 |
+
|
| 448 |
+
| Surface Type | Standard Method |
|
| 449 |
+
|--------------|-----------------|
|
| 450 |
+
| Steel roof deck | HEPA vac → Wet wipe → Rinse |
|
| 451 |
+
| Steel joists/beams | HEPA vac → Wet wipe → Rinse |
|
| 452 |
+
| Steel columns | HEPA vac → Wet wipe → Rinse |
|
| 453 |
+
| Concrete floor | Scrubber machine + alkaline |
|
| 454 |
+
| CMU walls | HEPA vac → Wet wipe OR power wash |
|
| 455 |
+
| Metal doors | Wet wipe → Rinse |
|
| 456 |
+
| Rigid ductwork | Per NADCA ACR |
|
| 457 |
+
|
| 458 |
+
### 5.3 Air Filtration Requirements
|
| 459 |
+
|
| 460 |
+
**Source:** NADCA ACR 2021 Edition, Section 3.6
|
| 461 |
+
|
| 462 |
+
**Minimum Requirement:** 4 air changes per hour (ACH)
|
| 463 |
+
|
| 464 |
+
**Calculation:**
|
| 465 |
+
```
|
| 466 |
+
Units Required = (Volume CF × 4 ACH) / (Unit CFM × 60)
|
| 467 |
+
|
| 468 |
+
Where:
|
| 469 |
+
Volume CF = Area SF × Ceiling Height FT
|
| 470 |
+
Unit CFM = Rated capacity of air scrubber
|
| 471 |
+
```
|
| 472 |
+
|
| 473 |
+
**Example:**
|
| 474 |
+
```
|
| 475 |
+
Work Area: 50,000 SF × 30 FT = 1,500,000 CF
|
| 476 |
+
Units = (1,500,000 × 4) / (2,000 CFM × 60) = 50 units
|
| 477 |
+
```
|
| 478 |
+
|
| 479 |
+
### 5.4 Reclean/Retest Protocol
|
| 480 |
+
|
| 481 |
+
When PRV samples exceed clearance thresholds:
|
| 482 |
+
|
| 483 |
+
**Step 1: Identify Deficient Areas**
|
| 484 |
+
- Map failed sample locations
|
| 485 |
+
- Determine surface types affected
|
| 486 |
+
- Assess pattern (localized vs widespread)
|
| 487 |
+
|
| 488 |
+
**Step 2: Reclean Specification**
|
| 489 |
+
```
|
| 490 |
+
Failed surfaces at [SAMPLE LOCATIONS] require additional cleaning:
|
| 491 |
+
- [SURFACE TYPE]: Execute [PROTOCOL] with additional pass
|
| 492 |
+
- Extend cleaning 10 feet beyond failed sample locations
|
| 493 |
+
- Document cleaning date, method, and personnel
|
| 494 |
+
```
|
| 495 |
+
|
| 496 |
+
**Step 3: Retest Protocol**
|
| 497 |
+
- Resample at original failed locations
|
| 498 |
+
- Add samples at adjacent locations if pattern suggests broader issue
|
| 499 |
+
- Same laboratory and analytical methods as original PRV
|
| 500 |
+
|
| 501 |
+
**Step 4: Documentation**
|
| 502 |
+
- Reference original sample numbers and results
|
| 503 |
+
- Document reclean activities
|
| 504 |
+
- Report retest results with comparison to original
|
| 505 |
+
|
| 506 |
+
**Iteration:** Repeat until all samples pass clearance thresholds.
|
| 507 |
+
|
| 508 |
+
---
|
| 509 |
+
|
| 510 |
+
## Part 6: Documentation Outputs
|
| 511 |
+
|
| 512 |
+
### 6.1 Deliverable 1: Cleaning Specification / Scope of Work
|
| 513 |
+
|
| 514 |
+
**Purpose:** Define scope, methods, labor, equipment, and acceptance criteria for contractor execution.
|
| 515 |
+
|
| 516 |
+
**Required Sections:**
|
| 517 |
+
|
| 518 |
+
| Section | Content |
|
| 519 |
+
|---------|---------|
|
| 520 |
+
| Project Identification | Facility, address, contact, dates |
|
| 521 |
+
| Scope Summary | Affected areas, zone classifications, total SF by disposition |
|
| 522 |
+
| Surface Inventory | Itemized surfaces by type, area, condition, disposition |
|
| 523 |
+
| Work Area Preparation | Containment, air filtration calculations (4 ACH minimum) |
|
| 524 |
+
| Surface-Specific Procedures | Cleaning methods by surface type |
|
| 525 |
+
| Removal Scope | Materials requiring removal with quantities |
|
| 526 |
+
| Labor Estimate | Hours by task, production rates applied |
|
| 527 |
+
| Equipment Requirements | Air scrubbers, lifts, supplies with quantities |
|
| 528 |
+
| Quality Assurance Criteria | Pass/fail thresholds for PRV |
|
| 529 |
+
| Worker Protection | PPE, safety protocols |
|
| 530 |
+
|
| 531 |
+
**Ceiling Deck Emphasis:** When ceiling decks are in scope, include:
|
| 532 |
+
- Note regarding enhanced sample density at PRV
|
| 533 |
+
- Recommendation for additional cleaning pass
|
| 534 |
+
- Access method requirements
|
| 535 |
+
|
| 536 |
+
### 6.2 Deliverable 2: Results Interpretation
|
| 537 |
+
|
| 538 |
+
**Purpose:** Establish applicable thresholds with regulatory justification and determine pass/fail status.
|
| 539 |
+
|
| 540 |
+
**Required Sections:**
|
| 541 |
+
|
| 542 |
+
| Section | Content |
|
| 543 |
+
|---------|---------|
|
| 544 |
+
| Purpose Statement | Why interpretation needed, specific questions addressed |
|
| 545 |
+
| Facility Classification | Operational / Non-Operational / Public-Childcare determination |
|
| 546 |
+
| Regulatory Framework | Applicable standards with citations |
|
| 547 |
+
| Regulatory Justification | Justification block per Section 3.3 |
|
| 548 |
+
| Recommended Thresholds | Specific values with source citations |
|
| 549 |
+
| Results Comparison | Actual data vs thresholds |
|
| 550 |
+
| Pass/Fail Determination | By sample, by area, overall |
|
| 551 |
+
| Reclean Requirements | If applicable, per Section 5.4 |
|
| 552 |
+
| Response to Inquiries | Address specific stakeholder questions if applicable |
|
| 553 |
+
|
| 554 |
+
**Standards Basis Statement (Required):**
|
| 555 |
+
> Metals thresholds are standards-based per BNL SOP IH75190. Particulate thresholds represent professional judgment with empirical validation (see FDAM Appendix B).
|
| 556 |
+
|
| 557 |
+
### 6.3 Deliverable 3: Executive Summary Report
|
| 558 |
+
|
| 559 |
+
**Purpose:** Document completion and compliance for closeout.
|
| 560 |
+
|
| 561 |
+
**Required Sections:**
|
| 562 |
+
|
| 563 |
+
| Section | Content |
|
| 564 |
+
|---------|---------|
|
| 565 |
+
| Project Summary | Identification, scope performed, conclusions |
|
| 566 |
+
| Clearance Confirmation | Statement that all areas passed clearance criteria |
|
| 567 |
+
| Discussion of Results | Testing summary, any reclean/retest activities |
|
| 568 |
+
| Threshold Reference | Thresholds applied with regulatory basis |
|
| 569 |
+
| Chronology | Timeline of assessment, cleaning, verification |
|
| 570 |
+
| Appendices | Lab reports, photos, field documentation |
|
| 571 |
+
| Standard of Care | Professional limitations |
|
| 572 |
+
| Standards Basis Statement | Per Section 6.2 |
|
| 573 |
+
|
| 574 |
+
---
|
| 575 |
+
|
| 576 |
+
## Part 7: Validation Requirements
|
| 577 |
+
|
| 578 |
+
### 7.1 Threshold Validation Status
|
| 579 |
+
|
| 580 |
+
| Category | Status | Source | Validation |
|
| 581 |
+
|----------|--------|--------|------------|
|
| 582 |
+
| Metals (Pb, Cd, As) | **Verified** | BNL SOP IH75190 | Standards-based |
|
| 583 |
+
| Particulates | **Validated** | IHC + empirical data | 93.3% pass rate (n=45) |
|
| 584 |
+
| ACH requirements | **Verified** | NADCA ACR 2021 | Standards-based |
|
| 585 |
+
| Sample density | Professional Judgment | Internal guidance | Ongoing refinement |
|
| 586 |
+
|
| 587 |
+
### 7.2 Validation Criteria
|
| 588 |
+
|
| 589 |
+
Thresholds are validated when:
|
| 590 |
+
- >90% first-pass clearance rate with proper cleaning
|
| 591 |
+
- <5% false negatives
|
| 592 |
+
- Correlation with absence of occupant complaints post-restoration
|
| 593 |
+
|
| 594 |
+
### 7.3 Ongoing Data Collection
|
| 595 |
+
|
| 596 |
+
For threshold refinement, collect:
|
| 597 |
+
- Condition assessment + lab result + clearance outcome (paired)
|
| 598 |
+
- Surface type performance data
|
| 599 |
+
- Reclean frequency by surface type
|
| 600 |
+
- Control/background sample baselines
|
| 601 |
+
|
| 602 |
+
---
|
| 603 |
+
|
| 604 |
+
## Part 8: System Architecture
|
| 605 |
+
|
| 606 |
+
### 8.1 SmokeScan Implementation
|
| 607 |
+
|
| 608 |
+
```
|
| 609 |
+
FIELD DEVICE
|
| 610 |
+
├── Project/building/zone/room hierarchy
|
| 611 |
+
├── Zone classification with distance documentation
|
| 612 |
+
├── Surface inventory (type, material, condition, area)
|
| 613 |
+
├── Photo capture with metadata
|
| 614 |
+
├── Sample location documentation
|
| 615 |
+
└── Offline capability with sync
|
| 616 |
+
|
| 617 |
+
CLOUD PLATFORM
|
| 618 |
+
├── Project data management
|
| 619 |
+
├── Lab result entry and threshold comparison
|
| 620 |
+
├── SOW calculations (quantities, labor, equipment)
|
| 621 |
+
├── Document generation
|
| 622 |
+
├── Pass/fail determination with threshold source flagging
|
| 623 |
+
└── Report export
|
| 624 |
+
```
|
| 625 |
+
|
| 626 |
+
### 8.2 Calculation Engine
|
| 627 |
+
|
| 628 |
+
**Surface Area Aggregation:**
|
| 629 |
+
```
|
| 630 |
+
Total by Type = Σ(Surface.area) WHERE Surface.type = [type]
|
| 631 |
+
Total by Disposition = Σ(Surface.area) WHERE Surface.disposition = [action]
|
| 632 |
+
```
|
| 633 |
+
|
| 634 |
+
**Equipment Sizing:**
|
| 635 |
+
```
|
| 636 |
+
Air Scrubbers = (Total Volume × 4 ACH) / (Unit CFM × 60)
|
| 637 |
+
```
|
| 638 |
+
|
| 639 |
+
**Pass/Fail Determination:**
|
| 640 |
+
```
|
| 641 |
+
FOR each Result:
|
| 642 |
+
Threshold = Lookup(Analyte, Classification)
|
| 643 |
+
ThresholdSource = Lookup(Analyte, Source)
|
| 644 |
+
IF Result < Threshold THEN Pass ELSE Fail
|
| 645 |
+
FLAG if ThresholdSource = "Professional Judgment"
|
| 646 |
+
```
|
| 647 |
+
|
| 648 |
+
---
|
| 649 |
+
|
| 650 |
+
## Part 9: Future Research
|
| 651 |
+
|
| 652 |
+
### 9.1 Field Screening Methods
|
| 653 |
+
|
| 654 |
+
**Optical Density Approach:**
|
| 655 |
+
Develop calibrated visual assessment correlating reflectance measurements to contamination levels.
|
| 656 |
+
|
| 657 |
+
**Research Questions:**
|
| 658 |
+
- Can OD measurements correlate with tape lift particle counts?
|
| 659 |
+
- What calibration protocol provides reliable results?
|
| 660 |
+
|
| 661 |
+
### 9.2 Control Sample Protocol
|
| 662 |
+
|
| 663 |
+
**Decision Required:** Determine whether control/background samples should be mandatory for relative comparison, or if absolute thresholds are sufficient.
|
| 664 |
+
|
| 665 |
+
**Options:**
|
| 666 |
+
- A: Mandatory control sample with relative pass/fail logic
|
| 667 |
+
- B: Control samples recommended but absolute thresholds authoritative
|
| 668 |
+
- C: Control samples required only for disputed results
|
| 669 |
+
|
| 670 |
+
### 9.3 Surface-Specific Threshold Refinement
|
| 671 |
+
|
| 672 |
+
With additional data collection, evaluate whether surface-specific thresholds are warranted (e.g., tighter thresholds for ceiling decks given higher failure rates).
|
| 673 |
+
|
| 674 |
+
---
|
| 675 |
+
|
| 676 |
+
## Appendix A: Lab Result Interpretation Framework
|
| 677 |
+
|
| 678 |
+
### A.1 Supported Laboratory Formats
|
| 679 |
+
|
| 680 |
+
FDAM supports two primary laboratory reporting formats:
|
| 681 |
+
|
| 682 |
+
**Format 1: Quantitative (particles/cm²)**
|
| 683 |
+
- Labs: Hayes Microbial, EMSL, others
|
| 684 |
+
- Direct comparison to FDAM thresholds
|
| 685 |
+
- Preferred format for pass/fail determination
|
| 686 |
+
|
| 687 |
+
**Format 2: Semi-Quantitative (% particles per field at 400x)**
|
| 688 |
+
- Labs: N.G. Carlson Analytical, EAA Baxter methodology
|
| 689 |
+
- Requires interpretation guidance
|
| 690 |
+
- Methodological differences from Format 1
|
| 691 |
+
|
| 692 |
+
### A.2 Format 1: Quantitative Interpretation
|
| 693 |
+
|
| 694 |
+
Direct threshold comparison:
|
| 695 |
+
|
| 696 |
+
| Analyte | Result | Threshold | Determination |
|
| 697 |
+
|---------|--------|-----------|---------------|
|
| 698 |
+
| Ash/Char | [value]/cm² | < 150/cm² | PASS if < 150 |
|
| 699 |
+
| Aciniform Soot | [value]/cm² | < 500/cm² | PASS if < 500 |
|
| 700 |
+
|
| 701 |
+
### A.3 Format 2: Semi-Quantitative Interpretation
|
| 702 |
+
|
| 703 |
+
**Source:** EAA Air-O-Cell Method Guide & Particle Atlas (2018); EMSL Fire & Smoke Damage Guide 2021
|
| 704 |
+
|
| 705 |
+
| % per Field (400x) | Lab Interpretation | FDAM Guidance |
|
| 706 |
+
|--------------------|-------------------|---------------|
|
| 707 |
+
| < 1% | Typical low | Presumed PASS - consistent with clearance |
|
| 708 |
+
| < 3% | Upper background | Presumed PASS - within acceptable range |
|
| 709 |
+
| 3-10% | Moderate impact | Professional judgment required |
|
| 710 |
+
| > 10% | Significant impact | Presumed FAIL - additional cleaning likely required |
|
| 711 |
+
|
| 712 |
+
**Methodological Caveat:**
|
| 713 |
+
Percentage-per-field and particles/cm² are fundamentally different analytical approaches. The guidance above represents professional correlation, not mathematical conversion. When results fall in the 3-10% range, consider:
|
| 714 |
+
- Visual condition at sample location
|
| 715 |
+
- Comparison to control samples
|
| 716 |
+
- Overall project context
|
| 717 |
+
- Retesting with quantitative methodology if determination is critical
|
| 718 |
+
|
| 719 |
+
### A.4 Decision Logic
|
| 720 |
+
|
| 721 |
+
```
|
| 722 |
+
INPUT: Lab Result + Format + Facility Classification
|
| 723 |
+
|
| 724 |
+
STEP 1: Identify Format
|
| 725 |
+
IF particles/cm² → Use A.2 direct comparison
|
| 726 |
+
IF % per field → Use A.3 interpretation guidance
|
| 727 |
+
|
| 728 |
+
STEP 2: Determine Threshold
|
| 729 |
+
Metals → Per Facility Classification (Section 3.1)
|
| 730 |
+
Particulates → Standard thresholds (Section 1.6)
|
| 731 |
+
|
| 732 |
+
STEP 3: Compare and Determine
|
| 733 |
+
IF Result < Threshold → PASS
|
| 734 |
+
IF Result > Threshold → FAIL
|
| 735 |
+
IF Semi-quantitative in judgment range → Flag for professional review
|
| 736 |
+
|
| 737 |
+
STEP 4: Document
|
| 738 |
+
Record result, threshold, source, determination
|
| 739 |
+
Flag professional judgment thresholds
|
| 740 |
+
```
|
| 741 |
+
|
| 742 |
+
### A.5 Laboratory Selection Guidance
|
| 743 |
+
|
| 744 |
+
When selecting laboratories:
|
| 745 |
+
- Confirm reporting format before submission
|
| 746 |
+
- Request particles/cm² format when available
|
| 747 |
+
- Ensure consistent methodology across PRA and PRV sampling
|
| 748 |
+
- Request differentiation notes if atypical particles observed
|
| 749 |
+
|
| 750 |
+
### A.6 Unit Conversion Reference
|
| 751 |
+
|
| 752 |
+
Laboratories may report surface particle concentrations in different units. Use the following conversions:
|
| 753 |
+
|
| 754 |
+
**Area Conversions:**
|
| 755 |
+
```
|
| 756 |
+
1 cm² = 100 mm²
|
| 757 |
+
cts/mm² × 100 = cts/cm²
|
| 758 |
+
cts/cm² ÷ 100 = cts/mm²
|
| 759 |
+
```
|
| 760 |
+
|
| 761 |
+
**Common Laboratory Unit Formats:**
|
| 762 |
+
|
| 763 |
+
| Lab Format | Unit | Conversion to FDAM (cts/cm²) |
|
| 764 |
+
|------------|------|------------------------------|
|
| 765 |
+
| Hayes Microbial | cts/cm² | Direct comparison |
|
| 766 |
+
| EAA | cts/mm² | Multiply by 100 |
|
| 767 |
+
| N.G. Carlson | % per field | Use Appendix A.3 guidance |
|
| 768 |
+
|
| 769 |
+
**Example Conversion:**
|
| 770 |
+
- EAA reports: 5.0 cts/mm² fire residue
|
| 771 |
+
- FDAM equivalent: 5.0 × 100 = 500 cts/cm²
|
| 772 |
+
- Threshold comparison: 500 cts/cm² vs <150 (Ash/Char) = FAIL
|
| 773 |
+
|
| 774 |
+
**EAA Classification to FDAM Threshold Comparison:**
|
| 775 |
+
|
| 776 |
+
| EAA Classification | EAA (cts/mm²) | Converted (cts/cm²) | FDAM Status |
|
| 777 |
+
|--------------------|---------------|---------------------|-------------|
|
| 778 |
+
| Low | <1.0 | <100 | PASS |
|
| 779 |
+
| Typical-low | 1.0-5.0 | 100-500 | Evaluate vs threshold |
|
| 780 |
+
| Low-moderate | 5.0-10 | 500-1,000 | Likely FAIL |
|
| 781 |
+
| Moderate | 10-50 | 1,000-5,000 | FAIL |
|
| 782 |
+
| High | >50 | >5,000 | FAIL |
|
| 783 |
+
|
| 784 |
+
FDAM clearance thresholds (150 cts/cm² ash/char, 500 cts/cm² aciniform) fall within or at the upper boundary of EAA's "Typical-low" classification (100-500 cts/cm²), confirming FDAM thresholds are appropriately conservative for post-restoration clearance.
|
| 785 |
+
|
| 786 |
+
---
|
| 787 |
+
|
| 788 |
+
## Appendix B: Empirical Validation Data
|
| 789 |
+
|
| 790 |
+
### B.1 QVC Distribution Center Dataset
|
| 791 |
+
|
| 792 |
+
**Project:** QVC Outbound Fire Loss Restoration
|
| 793 |
+
**Location:** Rocky Mount, NC
|
| 794 |
+
**Date:** March 2023
|
| 795 |
+
**Sample Type:** Post-Restoration Verification (PRV)
|
| 796 |
+
**Sample Count:** 45 Bio-Tape samples (1.00 cm²)
|
| 797 |
+
**Laboratory:** Hayes Microbial Consulting
|
| 798 |
+
**Facility Classification:** Non-Operational Commercial
|
| 799 |
+
|
| 800 |
+
### B.2 Results Summary
|
| 801 |
+
|
| 802 |
+
**Aciniform-like Soot:**
|
| 803 |
+
|
| 804 |
+
| Statistic | Value |
|
| 805 |
+
|-----------|-------|
|
| 806 |
+
| Non-Detect | 21 samples (46.7%) |
|
| 807 |
+
| Range (detected) | 1 - 2,200/cm² |
|
| 808 |
+
| Median (detected) | 4.5/cm² |
|
| 809 |
+
| 90th Percentile | 65/cm² |
|
| 810 |
+
| Pass Rate | 91.1% (41/45) |
|
| 811 |
+
|
| 812 |
+
**Ash and Char:**
|
| 813 |
+
|
| 814 |
+
| Statistic | Value |
|
| 815 |
+
|-----------|-------|
|
| 816 |
+
| Non-Detect | 2 samples (4.4%) |
|
| 817 |
+
| Range (detected) | 1 - 440/cm² |
|
| 818 |
+
| Median (detected) | 5/cm² |
|
| 819 |
+
| 90th Percentile | 60/cm² |
|
| 820 |
+
| Pass Rate | 97.8% (44/45) |
|
| 821 |
+
|
| 822 |
+
**Combined Pass/Fail:**
|
| 823 |
+
|
| 824 |
+
| Status | Count | Percentage |
|
| 825 |
+
|--------|-------|------------|
|
| 826 |
+
| Both Pass | 42 | 93.3% |
|
| 827 |
+
| Any Fail | 3 | 6.7% |
|
| 828 |
+
|
| 829 |
+
### B.3 Surface Type Analysis
|
| 830 |
+
|
| 831 |
+
| Surface Type | Samples | Pass Rate |
|
| 832 |
+
|--------------|---------|-----------|
|
| 833 |
+
| Ceiling Deck (CD) | 17 | 82.4% |
|
| 834 |
+
| Ceiling Joist (CJ) | 20 | 95.0% |
|
| 835 |
+
| Beam | 6 | 100% |
|
| 836 |
+
| Column | 1 | 100% |
|
| 837 |
+
| Pipe | 1 | 100% |
|
| 838 |
+
|
| 839 |
+
**Finding:** Ceiling decks exhibit significantly lower pass rates, driving the ceiling deck emphasis protocol in Section 4.5.
|
| 840 |
+
|
| 841 |
+
### B.4 Failed Sample Analysis
|
| 842 |
+
|
| 843 |
+
| Sample | Location | Aciniform | Ash/Char | Failure |
|
| 844 |
+
|--------|----------|-----------|----------|---------|
|
| 845 |
+
| 02 | B2-C2 Grid - Ceiling Deck | 2,200/cm² | 4/cm² | Aciniform |
|
| 846 |
+
| 06 | D2-E2 Grid - Ceiling Deck | 1,320/cm² | 15/cm² | Aciniform |
|
| 847 |
+
| 12 | E3-F3 Grid - CJ Horizontal | 8/cm² | 440/cm² | Ash/Char |
|
| 848 |
+
|
| 849 |
+
All failures were addressed through reclean/retest protocol and subsequently passed.
|
| 850 |
+
|
| 851 |
+
### B.5 Laboratory Reference Ranges
|
| 852 |
+
|
| 853 |
+
**Source:** Hayes Microbial Consulting, based on ASTM D6602
|
| 854 |
+
|
| 855 |
+
| Particle Type | Normal Surface Range |
|
| 856 |
+
|---------------|---------------------|
|
| 857 |
+
| Ash/Char | 0-300/cm² |
|
| 858 |
+
| Aciniform Soot | 0-800/cm² |
|
| 859 |
+
| Cellulose Fibers | 0-1,600/cm² |
|
| 860 |
+
| Synthetic Fibers | 0-1,600/cm² |
|
| 861 |
+
| Silicates | 0-2,800/cm² |
|
| 862 |
+
|
| 863 |
+
These ranges represent typical environments, not post-fire clearance criteria. FDAM thresholds are set below these ranges to ensure demonstrably clean post-restoration conditions.
|
| 864 |
+
|
| 865 |
+
### B.6 Our Lady of Victory Dataset
|
| 866 |
+
|
| 867 |
+
**Project:** Our Lady of Victory (Catholic School)
|
| 868 |
+
**Location:** Minnesota
|
| 869 |
+
**Date:** February 2025
|
| 870 |
+
**Sample Type:** Assessment
|
| 871 |
+
**Sample Count:** 55 tease-tape samples
|
| 872 |
+
**Laboratory:** N.G. Carlson Analytical
|
| 873 |
+
**Facility Classification:** Public-Childcare
|
| 874 |
+
|
| 875 |
+
**Methodology:** Semi-quantitative (% particles per field at 400x)
|
| 876 |
+
|
| 877 |
+
**Distribution by Impact Level:**
|
| 878 |
+
|
| 879 |
+
| Impact Level | Samples | Percentage |
|
| 880 |
+
|--------------|---------|------------|
|
| 881 |
+
| No Char/No Soot | 14 | 27% |
|
| 882 |
+
| Typical Low (<1%) | 25 | 48% |
|
| 883 |
+
| Upper Background (<3%) | 7 | 13% |
|
| 884 |
+
| Moderate (3-10%) | 5 | 10% |
|
| 885 |
+
| Significant (>10%) | 1 | 2% |
|
| 886 |
+
|
| 887 |
+
**Pattern Observation:** Basement and lower-level areas showed higher contamination, consistent with smoke stratification.
|
| 888 |
+
|
| 889 |
+
---
|
| 890 |
+
|
| 891 |
+
## Appendix C: Deliverable Templates
|
| 892 |
+
|
| 893 |
+
### C.1 Cleaning Specification / SOW - Key Language Blocks
|
| 894 |
+
|
| 895 |
+
**Scope Statement:**
|
| 896 |
+
> [FACILITY] sustained fire damage on [DATE]. Industrial Hygiene Consulting, Corp. (IHC) conducted Pre-Restoration Assessment on [DATE]. Based on laboratory analysis and field assessment, the following cleaning specification establishes scope, methods, and acceptance criteria for fire residue restoration.
|
| 897 |
+
|
| 898 |
+
**Zone Summary Table:**
|
| 899 |
+
```
|
| 900 |
+
| Zone | Area (SF) | Condition | Disposition |
|
| 901 |
+
|------|-----------|-----------|-------------|
|
| 902 |
+
| [Zone ID] | [SF] | [Condition] | Clean/Remove |
|
| 903 |
+
```
|
| 904 |
+
|
| 905 |
+
**Air Filtration Calculation:**
|
| 906 |
+
> Work area volume: [SF] × [Height] = [CF]
|
| 907 |
+
> Required ACH: 4 (NADCA ACR 2021)
|
| 908 |
+
> Air scrubber capacity: [CFM] per unit
|
| 909 |
+
> Units required: ([CF] × 4) / ([CFM] × 60) = [Units]
|
| 910 |
+
|
| 911 |
+
**Acceptance Criteria:**
|
| 912 |
+
> Post-restoration verification sampling will be conducted per FDAM methodology. Clearance thresholds:
|
| 913 |
+
> - Ash and Char: < 150 particles/cm²
|
| 914 |
+
> - Aciniform Soot: < 500 particles/cm²
|
| 915 |
+
> - Lead: [Threshold] µg/100cm² per [Classification] standards
|
| 916 |
+
>
|
| 917 |
+
> Surfaces exceeding thresholds require reclean and retest until passing.
|
| 918 |
+
|
| 919 |
+
### C.2 Results Interpretation - Key Language Blocks
|
| 920 |
+
|
| 921 |
+
**Purpose Statement:**
|
| 922 |
+
> IHC provides this results interpretation to establish applicable clearance thresholds for [FACILITY] based on facility classification and regulatory framework.
|
| 923 |
+
|
| 924 |
+
**Classification Determination:**
|
| 925 |
+
> [Insert applicable regulatory justification block from Section 3.3]
|
| 926 |
+
|
| 927 |
+
**Threshold Table:**
|
| 928 |
+
```
|
| 929 |
+
| Analyte | Threshold | Unit | Source |
|
| 930 |
+
|---------|-----------|------|--------|
|
| 931 |
+
| Lead | [value] | µg/100cm² | [BNL/EPA-HUD] |
|
| 932 |
+
| Ash/Char | 150 | particles/cm² | IHC/FDAM |
|
| 933 |
+
| Aciniform | 500 | particles/cm² | IHC/FDAM |
|
| 934 |
+
```
|
| 935 |
+
|
| 936 |
+
**Pass/Fail Summary:**
|
| 937 |
+
> Of [N] samples collected, [X] passed all clearance thresholds. [Y] samples exceeded thresholds and require reclean/retest per Section 5.4.
|
| 938 |
+
|
| 939 |
+
**Standards Basis Statement:**
|
| 940 |
+
> Metals thresholds are standards-based per BNL SOP IH75190 (Rev23, 06/23/17). Particulate thresholds represent professional judgment developed through IHC field experience with empirical validation (93.3% pass rate, n=45).
|
| 941 |
+
|
| 942 |
+
### C.3 Executive Summary - Key Language Blocks
|
| 943 |
+
|
| 944 |
+
**Clearance Statement:**
|
| 945 |
+
> Based on post-restoration verification testing conducted [DATE], all tested surfaces within [FACILITY] meet applicable clearance criteria. The fire residue restoration is complete and the facility is cleared for reoccupancy.
|
| 946 |
+
|
| 947 |
+
**Testing Summary:**
|
| 948 |
+
> [N] tape lift samples and [N] surface wipe samples were collected from [AREAS]. All results were below applicable thresholds.
|
| 949 |
+
|
| 950 |
+
**Threshold Reference:**
|
| 951 |
+
> Clearance thresholds applied:
|
| 952 |
+
> - Lead: [value] µg/100cm² (BNL SOP IH75190, Non-Operational)
|
| 953 |
+
> - Particulates: < 150/cm² ash/char, < 500/cm² aciniform (IHC/FDAM professional judgment with empirical validation)
|
| 954 |
+
|
| 955 |
+
---
|
| 956 |
+
|
| 957 |
+
## Appendix D: Reference Standards Compendium
|
| 958 |
+
|
| 959 |
+
### D.1 Primary Standards (Verified)
|
| 960 |
+
|
| 961 |
+
| Standard | Title | Version | Application |
|
| 962 |
+
|----------|-------|---------|-------------|
|
| 963 |
+
| BNL SOP IH75190 | Surface Wipe Sampling for Metals | Rev23, 06/23/17 | Metals clearance thresholds |
|
| 964 |
+
| EPA/HUD Lead Dust Hazard Standards | Lead Dust Hazard Standards | October 2024 | Public-Childcare lead thresholds |
|
| 965 |
+
| NADCA ACR | Assessment, Cleaning and Restoration of HVAC Systems | 2021 Edition | Air filtration requirements |
|
| 966 |
+
| IICRC/RIA/CIRI Technical Guide | Technical Guide for Wildfire Restoration | December 2025 | Zone framework |
|
| 967 |
+
| Army/Air Force National Guard | Guidelines for Indoor Firing Range Rehabilitation | Current | Non-Operational lead alternative |
|
| 968 |
+
|
| 969 |
+
### D.2 Referenced Standards
|
| 970 |
+
|
| 971 |
+
| Standard | Application |
|
| 972 |
+
|----------|-------------|
|
| 973 |
+
| OSHA 29 CFR 1910.1025 | Lead housekeeping requirements |
|
| 974 |
+
| OSHA 29 CFR 1910.1018 | Arsenic housekeeping requirements |
|
| 975 |
+
| OSHA 29 CFR 1910.1027 | Cadmium housekeeping requirements |
|
| 976 |
+
| OSHA Technical Manual Section II Ch. 2 | Surface contaminant methodology |
|
| 977 |
+
| NIOSH Method 9100 | Surface wipe sampling procedures |
|
| 978 |
+
| IICRC S700 | Standard for Fire and Smoke Damage Restoration |
|
| 979 |
+
| IICRC S520 | Standard for Mold Remediation |
|
| 980 |
+
| ASTM D6602 | Sampling and Testing of Carbon Black |
|
| 981 |
+
|
| 982 |
+
### D.3 Laboratory References
|
| 983 |
+
|
| 984 |
+
| Reference | Application |
|
| 985 |
+
|-----------|-------------|
|
| 986 |
+
| Environmental Analysis Associates (EAA) Air-O-Cell Method Guide & Particle Atlas (2018) | Combustion particle definitions; classification ranges; unit conversion reference; semi-quantitative interpretation |
|
| 987 |
+
| EMSL Fire & Smoke Damage Guide 2021 | Sampling procedures |
|
| 988 |
+
| Hayes Microbial Normal Ranges | Reference comparison (ASTM D6602 based) |
|
| 989 |
+
|
| 990 |
+
**Note on EAA:** Environmental Analysis Associates, founded by Daniel Baxter (inventor of the Air-O-Cell sampler), maintains 30+ years of indoor air quality data. Their classification system provides independent validation of FDAM threshold positioning. EAA reports in cts/mm² (convert to cts/cm² by multiplying by 100).
|
| 991 |
+
|
| 992 |
+
---
|
| 993 |
+
|
| 994 |
+
*FDAM v4.0.1 — End of Document*
|
RAG-KB/Fire Remediation Processes and Methodologies_ A Review of Industry-Endorsed Standards.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fire Remediation Processes and Methodologies: A Review of Industry-Endorsed Standards
|
| 2 |
+
|
| 3 |
+
**Author:** Manus AI
|
| 4 |
+
**Date:** January 8, 2026
|
| 5 |
+
|
| 6 |
+
## 1. Introduction
|
| 7 |
+
|
| 8 |
+
This report provides a comprehensive overview of industry-endorsed and published sources of domain knowledge for fire remediation processes and methodologies. The research project focused on identifying key standards, guidelines, and technical publications from major standards organizations, government agencies, and industry associations. The findings are intended to serve as a foundational resource for professionals in the fire restoration, insurance, and environmental health and safety sectors.
|
| 9 |
+
|
| 10 |
+
The fire and smoke damage restoration industry relies on a robust framework of standards and best practices to ensure that remediation work is performed safely, effectively, and in a scientifically defensible manner. This report synthesizes information from a wide range of sources, including the Institute of Inspection, Cleaning and Restoration Certification (IICRC), the National Fire Protection Association (NFPA), ASTM International, the Restoration Industry Association (RIA), the U.S. Environmental Protection Agency (EPA), and the Occupational Safety and Health Administration (OSHA).
|
| 11 |
+
|
| 12 |
+
## 2. Key Standards and Guidelines
|
| 13 |
+
|
| 14 |
+
The following sections detail the most relevant standards and guidelines from leading organizations in the field of fire and smoke damage restoration.
|
| 15 |
+
|
| 16 |
+
### 2.1. Institute of Inspection, Cleaning and Restoration Certification (IICRC)
|
| 17 |
+
|
| 18 |
+
The IICRC is a key standards-setting body for the restoration industry. Its standards are ANSI-accredited and internationally recognized as best practices.
|
| 19 |
+
|
| 20 |
+
**ANSI/IICRC S700: Standard for Professional Fire and Smoke Damage Restoration** [1]
|
| 21 |
+
|
| 22 |
+
This is the cornerstone standard for the fire and smoke damage restoration industry. It provides a comprehensive framework for the assessment and remediation of fire and smoke damage in buildings. The S700 standard covers the principles, processes, and procedures for assessing fire residues and odors, and for the cleaning and restoration of building systems, structures, and contents. It is important to note that the S700 standard is currently under revision, with a new version expected in the near future.
|
| 23 |
+
|
| 24 |
+
**ANSI/IICRC S590: Standard for Professional Assessment of HVAC Systems Following a Water, Fire, or Mold Damage Event** [2]
|
| 25 |
+
|
| 26 |
+
This standard focuses specifically on the assessment of HVAC systems after a fire or other damaging event. It provides detailed procedures for inspecting and evaluating HVAC systems to determine the extent of damage and to develop a restoration plan. The S590 standard is critical for ensuring that HVAC systems are properly cleaned and decontaminated to prevent the spread of contaminants throughout a building.
|
| 27 |
+
|
| 28 |
+
**IICRC/RIA/CIRI Technical Guide for Wildfire Restoration** [3]
|
| 29 |
+
|
| 30 |
+
Published in December 2025, this technical guide provides a science-based framework for the restoration of properties impacted by wildfires. It was developed in collaboration with the Restoration Industry Association (RIA) and the Cleaning Industry Research Institute (CIRI). The guide outlines a four-step process for wildfire restoration, including pre-restoration evaluation, pre-restoration assessment, the restoration phase, and project completion.
|
| 31 |
+
|
| 32 |
+
### 2.2. National Fire Protection Association (NFPA)
|
| 33 |
+
|
| 34 |
+
The NFPA is a global nonprofit organization devoted to eliminating death, injury, property, and economic loss due to fire, electrical, and related hazards. The NFPA develops and publishes more than 300 consensus codes and standards intended to minimize the risk and effects of fire.
|
| 35 |
+
|
| 36 |
+
**NFPA 921: Guide for Fire and Explosion Investigations** [4]
|
| 37 |
+
|
| 38 |
+
NFPA 921 is the primary guide for the scientific investigation of fire and explosion incidents. It establishes a systematic, scientific method for fire investigation that is widely accepted in the legal and insurance communities. The guide provides detailed information on fire dynamics, evidence collection and preservation, and the analysis of fire patterns.
|
| 39 |
+
|
| 40 |
+
**NFPA 1033: Standard for Professional Qualifications for Fire Investigator** [5]
|
| 41 |
+
|
| 42 |
+
This standard establishes the minimum job performance requirements for fire investigators. It is a critical standard for ensuring that fire investigations are conducted by qualified professionals with the necessary knowledge, skills, and abilities.
|
| 43 |
+
|
| 44 |
+
### 2.3. ASTM International
|
| 45 |
+
|
| 46 |
+
ASTM International is a globally recognized leader in the development and delivery of voluntary consensus standards. ASTM standards are used around the world to improve product quality, enhance health and safety, strengthen market access and trade, and build consumer confidence.
|
| 47 |
+
|
| 48 |
+
**ASTM E119: Standard Test Methods for Fire Tests of Building Construction and Materials** [6]
|
| 49 |
+
|
| 50 |
+
This standard is used to evaluate the fire-resistance of building materials and assemblies. It provides a standardized method for testing how long building elements can withstand a fire and continue to perform their structural function.
|
| 51 |
+
|
| 52 |
+
**ASTM C856: Standard Practice for Petrographic Examination of Hardened Concrete** [7]
|
| 53 |
+
|
| 54 |
+
This standard is used to assess the condition of concrete after a fire. It provides a method for examining the microstructure of concrete to determine the extent of damage and to guide repair and restoration efforts.
|
| 55 |
+
|
| 56 |
+
## 3. Government Agencies
|
| 57 |
+
|
| 58 |
+
Government agencies such as the EPA and OSHA also play a role in the fire restoration industry by providing guidelines and regulations related to environmental protection and worker safety.
|
| 59 |
+
|
| 60 |
+
### 3.1. U.S. Environmental Protection Agency (EPA)
|
| 61 |
+
|
| 62 |
+
The EPA provides guidance on the cleanup of hazardous materials after a fire, as well as on the management of debris and waste from fire-damaged buildings. The EPA's guidelines are designed to protect human health and the environment from the potential hazards associated with fire and smoke damage.
|
| 63 |
+
|
| 64 |
+
### 3.2. Occupational Safety and Health Administration (OSHA)
|
| 65 |
+
|
| 66 |
+
OSHA sets and enforces standards to ensure safe and healthful working conditions for working men and women. OSHA's regulations cover a wide range of workplace hazards, including those associated with fire and smoke damage restoration. These regulations include requirements for personal protective equipment (PPE), respiratory protection, and hazard communication.
|
| 67 |
+
|
| 68 |
+
## 4. Conclusion
|
| 69 |
+
|
| 70 |
+
The fire remediation industry is governed by a complex and evolving set of standards, guidelines, and best practices. This report has provided an overview of the key organizations and documents that shape the industry. It is essential for professionals in the field to stay current with these standards to ensure that they are providing the highest quality of service to their clients and to protect the health and safety of workers and the public.
|
| 71 |
+
|
| 72 |
+
## 5. References
|
| 73 |
+
|
| 74 |
+
[1] Institute of Inspection, Cleaning and Restoration Certification. (n.d.). *ANSI/IICRC S700 Standard for Professional Fire and Smoke Damage Restoration*. Retrieved from https://iicrc.org/s700/
|
| 75 |
+
|
| 76 |
+
[2] Institute of Inspection, Cleaning and Restoration Certification. (n.d.). *ANSI/IICRC S590 Standard for Professional Assessment of HVAC Systems Following a Water, Fire, or Mold Damage Event*. Retrieved from https://iicrc.org/s590/
|
| 77 |
+
|
| 78 |
+
[3] IICRC, RIA, & CIRI. (2025, December). *Technical Guide for Wildfire Restoration*. Retrieved from https://iicrc.org/wp-content/uploads/2025/12/IICRC.RIA_.CIRI-Technical-Guide-for-Wildfire-Restoration-V2-Final-2025-12.09.pdf
|
| 79 |
+
|
| 80 |
+
[4] National Fire Protection Association. (n.d.). *NFPA 921: Guide for Fire and Explosion Investigations*. Retrieved from https://www.nfpa.org/codes-and-standards/nfpa-921-standard-development/921
|
| 81 |
+
|
| 82 |
+
[5] National Fire Protection Association. (n.d.). *NFPA 1033: Standard for Professional Qualifications for Fire Investigator*. Referenced in industry documentation.
|
| 83 |
+
|
| 84 |
+
[6] ASTM International. (2020). *ASTM E119-20: Standard Test Methods for Fire Tests of Building Construction and Materials*. Retrieved from https://www.astm.org/e0119-20.html
|
| 85 |
+
|
| 86 |
+
[7] ASTM International. (n.d.). *ASTM C856: Standard Practice for Petrographic Examination of Hardened Concrete*. Referenced in industry practice.
|
RAG-KB/Industrial Hygiene Lab Services Guide.md
ADDED
|
@@ -0,0 +1,369 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Industrial Hygiene Lab Services Guide
|
| 2 |
+
|
| 3 |
+
**EMSL Analytical, Inc. - 2023 Edition**
|
| 4 |
+
|
| 5 |
+
*Methods and Threshold Values Reference*
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## Table of Contents
|
| 10 |
+
|
| 11 |
+
1. [About EMSL Analytical, Inc.](#about-emsl-analytical-inc)
|
| 12 |
+
2. [EMSL Diamond Standard](#emsl-diamond-standard)
|
| 13 |
+
3. [Locations and Network](#locations-and-network)
|
| 14 |
+
4. [Industrial Hygiene Testing Services](#industrial-hygiene-testing-services)
|
| 15 |
+
5. [Comprehensive Analyte List (A-Z)](#comprehensive-analyte-list-a-z)
|
| 16 |
+
6. [Group Profiles](#group-profiles)
|
| 17 |
+
7. [Rental Equipment](#rental-equipment)
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## About EMSL Analytical, Inc.
|
| 22 |
+
|
| 23 |
+
EMSL Analytical, Inc. has been providing quality analytical services since 1981 as the nation's leading environmental testing firm. The company offers a wide array of analytical testing services to support environmental investigations focused on asbestos, microbiology, lead paint, environmental chemistry, indoor air quality, industrial hygiene and food testing. Additionally, EMSL provides materials testing, characterization, and forensic laboratory services for a wide range of commercial, industrial, regulatory, and law enforcement clients.
|
| 24 |
+
|
| 25 |
+
The company's unmatched capacity coupled with a company-wide focus on customer satisfaction makes no project too large or too small. EMSL's corporate research and development capabilities allow them to bring new methodologies online quickly to meet new industry challenges and client needs. In recruiting and retaining talented and motivated scientists on a national scope, their expertise is marshaled throughout a nationwide network of analytical laboratories. EMSL is committed to providing reliable, defensible data in a standardized and user-friendly format. Rapid turnaround and competitive prices make the dependable results clients get that much more valuable.
|
| 26 |
+
|
| 27 |
+
**Mission Statement:** "We're much more than another testing laboratory. We are your project partner!"
|
| 28 |
+
|
| 29 |
+
### Overview of EMSL Service Divisions
|
| 30 |
+
|
| 31 |
+
#### Asbestos
|
| 32 |
+
- Asbestos analysis of air, water, bulk, soil and/or dust samples
|
| 33 |
+
- Various methodologies including NIOSH, EPA, OSHA, ASTM, etc.
|
| 34 |
+
- Utilizing PCM, PLM, TEM, SEM, XRD, and STEM
|
| 35 |
+
|
| 36 |
+
#### Lead and Metals
|
| 37 |
+
- Testing services include Flame AA, Graphite Furnace, and ICP
|
| 38 |
+
- Lead testing in paint chips, soil, wipes, drinking water, waste water, and air
|
| 39 |
+
|
| 40 |
+
#### Microbiology
|
| 41 |
+
- Analysis of fungi (mold), bacteria (Legionella, E. coli, Salmonella, Listeria, etc.)
|
| 42 |
+
- Mycotoxins, endotoxins, allergens, pollen testing
|
| 43 |
+
- Particulates in air, swab, water, soil, bulk, dust, wipe, food, and consumer products
|
| 44 |
+
|
| 45 |
+
#### Industrial Hygiene
|
| 46 |
+
- Testing services for air, wipe, and bulk matrices
|
| 47 |
+
- Extensive list of NIOSH, OSHA, ASTM, and EPA methods
|
| 48 |
+
|
| 49 |
+
#### Environmental Chemistry
|
| 50 |
+
- Instrumental and classical wet chemistry
|
| 51 |
+
- ICP spectroscopy, microscopy, SEM and EDS analysis
|
| 52 |
+
- FTIR analysis and more
|
| 53 |
+
|
| 54 |
+
#### Materials Science
|
| 55 |
+
- Materials testing, characterization, and forensic laboratory services
|
| 56 |
+
- Support for commercial, industrial, regulatory, and law enforcement clients
|
| 57 |
+
- Solutions for manufacturing challenges, quality assurance, and research and development
|
| 58 |
+
|
| 59 |
+
#### Food
|
| 60 |
+
- Microbiology analysis, nutritional analysis
|
| 61 |
+
- Various food chemistry analysis
|
| 62 |
+
- Allergens, toxins, and adulteration analysis
|
| 63 |
+
|
| 64 |
+
#### Radiochemistry
|
| 65 |
+
- Analysis of various matrices including food, water, soil, vegetation
|
| 66 |
+
- Other unique sample types for radioactivity
|
| 67 |
+
- Liberal radioactive materials license for most environmental radioactive needs
|
| 68 |
+
|
| 69 |
+
#### Air Toxics
|
| 70 |
+
- Testing services for VOCs in air, water and soil
|
| 71 |
+
- Consumer products testing
|
| 72 |
+
- Chamber studies for consumer product off-gassing analyses
|
| 73 |
+
- Understanding what products are emitting and comply with regulations
|
| 74 |
+
|
| 75 |
+
#### Pharmaceutical
|
| 76 |
+
- Microbiology testing services through MPL Laboratories
|
| 77 |
+
- Pharmaceutical, medical device, cosmetic, personal care, and food industries
|
| 78 |
+
- ISO/IEC 17025 accredited by PJLA, FDA and DEA registered, and NJDEP certified
|
| 79 |
+
|
| 80 |
+
#### PCR-DNA
|
| 81 |
+
- DNA and PCR laboratory services
|
| 82 |
+
- Bacteria, ERMI, fungi, and mold testing
|
| 83 |
+
- Scientific, ecological, research, biological, microbiological, environmental, food, and botanical professionals
|
| 84 |
+
|
| 85 |
+
#### Training
|
| 86 |
+
- Array of training including online educational courses
|
| 87 |
+
- Various laboratory services sampling videos
|
| 88 |
+
- In-person training
|
| 89 |
+
|
| 90 |
+
#### Products
|
| 91 |
+
- Environmental products, equipment, and supplies for the field
|
| 92 |
+
- Support for each company division
|
| 93 |
+
|
| 94 |
+
#### Legal Services
|
| 95 |
+
- Highly qualified and experienced professionals
|
| 96 |
+
- Chemists, geologists, physicists, mycologists, microbiologists, biologists, materials scientists, and industrial hygienists
|
| 97 |
+
- Available as-needed for legal support and expert witness testimony
|
| 98 |
+
|
| 99 |
+
---
|
| 100 |
+
|
| 101 |
+
## EMSL Diamond Standard
|
| 102 |
+
|
| 103 |
+
EMSL's diverse staff of approximately 1,000 employees possess a wide range of expertise, educational background, and capabilities. These dedicated employees follow the lead and standard of care demonstrated by the owner and founder of the company, Dr. Peter Frasca, who, as a hands-on owner maintains daily involvement in laboratory operations, and assures work is consistent with the **EMSL Diamond Standard**.
|
| 104 |
+
|
| 105 |
+
### The Diamond Standard Includes:
|
| 106 |
+
|
| 107 |
+
#### Quality Data
|
| 108 |
+
Track, manage, report, and verify that the data from all accredited testing services are accurate and reliable through quality programs and regulatory requirements.
|
| 109 |
+
|
| 110 |
+
#### Customer Dedication
|
| 111 |
+
EMSL strives to create lasting, mutually beneficial relationships with all clients. The company solicits feedback from clients and is committed to responding quickly to any questions or concerns that may arise before, during, or after an assignment.
|
| 112 |
+
|
| 113 |
+
#### Analytical Expertise
|
| 114 |
+
EMSL employs highly qualified and experienced chemists, geologists, physicists, mycologists, microbiologists, biologists, materials scientists, and industrial hygienists to enhance analytical abilities and expertise.
|
| 115 |
+
|
| 116 |
+
#### Integrity and Ethics
|
| 117 |
+
EMSL insists that employees uphold the highest standard of ethics. The company maintains a "no-compromise" policy as it pertains to any ethical issue.
|
| 118 |
+
|
| 119 |
+
#### Responsiveness
|
| 120 |
+
EMSL recognizes that the timeliness of a report is as important as the quality of the data. The company will not however, allow deadlines or the rush needs of a project to adversely impact quality objectives.
|
| 121 |
+
|
| 122 |
+
#### Technology
|
| 123 |
+
EMSL recognizes the importance of new technology to better enable improved services. Online access to data, customized reports, sample control/processing through the Laboratory Information Management System (LIMS), and analytical instrumentation are continuously upgraded to enable continuous improvement of services and capabilities.
|
| 124 |
+
|
| 125 |
+
#### Value
|
| 126 |
+
EMSL believes that a business relationship provides clients with excellent value. The company provides a complete value package that includes all the components of the EMSL Diamond Standard.
|
| 127 |
+
|
| 128 |
+
---
|
| 129 |
+
|
| 130 |
+
## Locations and Network
|
| 131 |
+
|
| 132 |
+
### Locally Focused, Nationally Recognized
|
| 133 |
+
|
| 134 |
+
**Unmatched capacity from the collective strength of nationwide locations.**
|
| 135 |
+
|
| 136 |
+
EMSL Analytical, Inc. has been fortunate to be able to maintain a solid history of stable growth and viability for over 40 years with a current network consisting of **48 laboratories and 2 service centers** across the United States and Canada.
|
| 137 |
+
|
| 138 |
+
**Corporate Headquarters:** Cinnaminson, NJ USA (also home to LA Testing)
|
| 139 |
+
|
| 140 |
+
---
|
| 141 |
+
|
| 142 |
+
## Industrial Hygiene Testing Services
|
| 143 |
+
|
| 144 |
+
EMSL Analytical, Inc. provides Industrial Hygiene (IH) Laboratory Services for air, wipe, and bulk matrices on an extensive list of NIOSH, OSHA, ASTM, and EPA test methods, boasting five IH laboratory locations within North America:
|
| 145 |
+
|
| 146 |
+
- **EMSL's Corporate Laboratory** - Cinnaminson, NJ
|
| 147 |
+
- **Indianapolis, IN**
|
| 148 |
+
- **Charlotte, NC**
|
| 149 |
+
- **Huntington Beach, CA** (LA Testing)
|
| 150 |
+
- **Toronto, ON** (Canadian location)
|
| 151 |
+
|
| 152 |
+
### Professional Team
|
| 153 |
+
|
| 154 |
+
The team of qualified and experienced professionals includes board-certified Industrial Hygienists (CIH), as well as highly trained project managers and analysts that welcome client interaction at project inception to ensure the laboratory data will meet all of the intended goals of the event, as well as communication during and after the event, as well as while samples are in-house. EMSL believes clear and concise communication is imperative to each project's success.
|
| 155 |
+
|
| 156 |
+
### Accreditation and Certifications
|
| 157 |
+
|
| 158 |
+
EMSL maintains **AIHA accreditation** for tests performed by the IH laboratories, which includes:
|
| 159 |
+
- On-site laboratory audits
|
| 160 |
+
- Formal document review program
|
| 161 |
+
- Staff experience and education criteria
|
| 162 |
+
- Proficiency Testing Program as part of the Accreditation process
|
| 163 |
+
|
| 164 |
+
Additionally, as required by various states, EMSL IH laboratories hold most applicable state certifications for fields of testing for air samples.
|
| 165 |
+
|
| 166 |
+
### Equipment and Quality Control
|
| 167 |
+
|
| 168 |
+
EMSL has state of the art equipment within each of the five IH laboratory locations, including:
|
| 169 |
+
- GC-ECD/GC-FID/GC-MS
|
| 170 |
+
- LC, MS, MS/HPLC/LC/MS/IC/XRD/UV-VIS/ICP-AES
|
| 171 |
+
- OES/ICP-MS
|
| 172 |
+
- And more
|
| 173 |
+
|
| 174 |
+
The analysis and reporting of each individual sample includes analysis of Quality Control (QC) samples, programs such as:
|
| 175 |
+
- Instrument QC controls
|
| 176 |
+
- Calibration standard checks
|
| 177 |
+
- Spiked media
|
| 178 |
+
- Reporting limit controls
|
| 179 |
+
|
| 180 |
+
All to ensure the confidence limits of the data are within the acceptable range as specified by the method requirements and Quality Control Program.
|
| 181 |
+
|
| 182 |
+
### Turnaround Times (TATs)
|
| 183 |
+
|
| 184 |
+
Labs maintain normal business day operational hours with weekend scheduling availability as needed for critical response situations. Samples are received during regular business hours and turnaround times (TATs) are tracked on business days.
|
| 185 |
+
|
| 186 |
+
**Available TATs:**
|
| 187 |
+
- Same day or next day
|
| 188 |
+
- 2 day
|
| 189 |
+
- 3 day
|
| 190 |
+
- 4 day
|
| 191 |
+
- 1 week
|
| 192 |
+
- 2 week Standard TATs
|
| 193 |
+
|
| 194 |
+
Costs/rates are based on the TAT requested with the 2 week TAT rates being the most economically cost-effective for customers.
|
| 195 |
+
|
| 196 |
+
### Laboratory Information Management System (LIMS)
|
| 197 |
+
|
| 198 |
+
Sample control/processing (log-in, results data-entry, reporting) is facilitated by the Laboratory Information Management System (LIMS) which tracks the sample job (batch) and provides the laboratory with work log (due dates) to help ensure all the work is organized and processed in accordance with the client's needs.
|
| 199 |
+
|
| 200 |
+
The LIMS includes security controls to ensure that information is controlled (locked) once the data has been documented and entered by the bench chemists. Reports are delivered at the choice of the customer which would include email, hard-copy regular mail, or both.
|
| 201 |
+
|
| 202 |
+
Additionally, EMSL can provide:
|
| 203 |
+
- Electronic Data Deliverables (EDD)
|
| 204 |
+
- Various QC Data Packages (contact for package pricing)
|
| 205 |
+
|
| 206 |
+
### Sampling Media and Pumps
|
| 207 |
+
|
| 208 |
+
Regarding media and pumps, EMSL offers a **"free IH sampling pump program"** for clients, provided the analysis is performed by one of the IH laboratories. An extensive list of products and media for sale is available, including: pumps, badges, field equipment/monitors, etc., all of which can be viewed via the website.
|
| 209 |
+
|
| 210 |
+
### Key Tests Available
|
| 211 |
+
|
| 212 |
+
The following is a summary of key tests (but are not limited to):
|
| 213 |
+
|
| 214 |
+
#### NIOSH Methods
|
| 215 |
+
- NIOSH 0500, 0600, 1003M, 1005M, 1007, 1013, 1019, 1024, 1300, 1301, 1400M, 1401, 1402, 1403, 1405, 1450, 1453, 1457, 1500M, 1501M, 1550M, 1603M
|
| 216 |
+
- NIOSH 1604, 1606M, 1610, 1612, 1615, 1616, 2000M, 2016M, 2500M, 2532, 2537, 2546M, 2551M, 3500, 5008M, 5026, 5040, 5041, 5042M, 5503M, 5506M, 5510M, 5523, 5524
|
| 217 |
+
- NIOSH 5600, 5601M, 6004M, 6009M, 6010M, 6011, 6013, 6014, 6016, 7082, 7401, 7500, 7501, 7600, 7602, 7906, 7907, 7908, 7908M, 9111M
|
| 218 |
+
|
| 219 |
+
#### OSHA Methods
|
| 220 |
+
- OSHA 42/47M, OSHA 5002M, OSHA 56, OSHA 58M, OSHA 64, OSHA 80, OSHA 83M, OSHA 91, OSHA 99M, OSHA 104, OSHA 109, OSHA 1007, OSHA 1008, OSHA 1010 V2, OSHA 1014, OSHA 1018, OSHA 1019, OSHA 103M, OSHA 5001, OSHA ID-113, OSHA ID-140
|
| 221 |
+
- OSHA ID-145, OSHA ID-165SG, OSHA ID-182, OSHA ID-188M, OSHA ID-190, OSHA ID-214, OSHA ID-215 V2, OSHA PV2061, OSHA PV2111, OSHA PV2119
|
| 222 |
+
|
| 223 |
+
#### Other Methods
|
| 224 |
+
- 40CFR50, Appendix B
|
| 225 |
+
- 40CFR50, Appendix J
|
| 226 |
+
- 40CFR50, Appendix L
|
| 227 |
+
- AssayTech LP 575
|
| 228 |
+
- ASTM D5504
|
| 229 |
+
- EPA IP-10A
|
| 230 |
+
- EMSL In-House Methods
|
| 231 |
+
|
| 232 |
+
**Note:** If you are looking for a method that is not listed, please contact EMSL immediately to confirm if they can perform. The list of services is being expanded regularly.
|
| 233 |
+
|
| 234 |
+
*For a full list of tests offered and for pricing, call for details.*
|
| 235 |
+
|
| 236 |
+
---
|
| 237 |
+
|
| 238 |
+
## Comprehensive Analyte List (A-Z)
|
| 239 |
+
|
| 240 |
+
This section contains detailed information about each analyte tested by EMSL's Industrial Hygiene laboratories. The list includes CAS numbers, test methods, synonyms, sampling instructions, flow rates, media types, and occupational exposure limits (OELs) from various regulatory agencies.
|
| 241 |
+
|
| 242 |
+
### Understanding the Analyte Table Columns
|
| 243 |
+
|
| 244 |
+
- **CAS Number:** Chemical Abstracts Service registry number for unique identification
|
| 245 |
+
- **Test:** Common name of the analyte
|
| 246 |
+
- **Test Method:** Specific NIOSH, OSHA, ASTM, or EPA method used
|
| 247 |
+
- **Synonym(s):** Alternative names for the chemical
|
| 248 |
+
- **Test Code:** EMSL internal test identification code
|
| 249 |
+
- **OSHA PEL or Other Value:** Occupational Safety and Health Administration Permissible Exposure Limit or other regulatory values
|
| 250 |
+
- **Most Relevant OEL (Value):** Most applicable Occupational Exposure Limit with value
|
| 251 |
+
- **Default Reporting Limit:** Minimum detection limit for the test
|
| 252 |
+
- **Sampling Instructions:** Special handling or storage requirements
|
| 253 |
+
- **Flow Rate (lpm):** Liters per minute for air sampling
|
| 254 |
+
- **Volume (L):** Total air volume to be sampled
|
| 255 |
+
- **Media:** Collection media types (filters, sorbent tubes, etc.)
|
| 256 |
+
- **Pump Kit ID:** EMSL equipment identification numbers
|
| 257 |
+
|
| 258 |
+
### Sample Analytes (Alphabetical)
|
| 259 |
+
|
| 260 |
+
| CAS Number | Analyte | Test Method | Synonym(s) | Key OEL |
|
| 261 |
+
|:-----------|:--------|:------------|:-----------|:--------|
|
| 262 |
+
| 83-32-9 | Acenaphthene | NIOSH 5506M | Dihydroacenaphthylene | 0.2 mg/m³ OSHA PEL TWA |
|
| 263 |
+
| 208-96-8 | Acenaphthylene | NIOSH 5506M | Acenaphthalene | 0.2 mg/m³ OSHA PEL TWA |
|
| 264 |
+
| 75-07-0 | Acetaldehyde | NIOSH 2016M | Acetic Aldehyde; Ethyl Aldehyde | 200 ppm OSHA PEL TWA |
|
| 265 |
+
| 64-19-7 | Acetic Acid | NIOSH 1603M | Ethanoic Acid | 10 ppm OSHA PEL TWA |
|
| 266 |
+
| 513-86-0 | Acetoin | NIOSH 2558 | 3-Hydroxy-2-Butanone | Not Established |
|
| 267 |
+
| 67-64-1 | Acetone | NIOSH 2016M | Dimethyl Ketone | 1000 ppm OSHA PEL TWA |
|
| 268 |
+
|
| 269 |
+
*Note: This is a representative sample. The complete guide contains hundreds of analytes from A-Z with full technical specifications, sampling parameters, and regulatory threshold values. Contact EMSL for the complete analyte database or specific chemical information.*
|
| 270 |
+
|
| 271 |
+
### Special Notes for Sampling
|
| 272 |
+
|
| 273 |
+
Many analytes require specific handling:
|
| 274 |
+
- **Light-sensitive compounds:** Protect from light and heat, wrap in foil
|
| 275 |
+
- **Volatile compounds:** Store in freezer, ship cold (5°C)
|
| 276 |
+
- **Temperature-sensitive:** Ship refrigerated (0°C)
|
| 277 |
+
- **Reactive compounds:** Special storage and shipping requirements noted
|
| 278 |
+
|
| 279 |
+
---
|
| 280 |
+
|
| 281 |
+
## Group Profiles
|
| 282 |
+
|
| 283 |
+
EMSL offers pre-configured test packages for common industrial hygiene scenarios. These group profiles streamline the testing process for frequently requested analyte combinations.
|
| 284 |
+
|
| 285 |
+
*Detailed group profile information is available on pages 59-61 of the complete guide.*
|
| 286 |
+
|
| 287 |
+
Common group profiles may include:
|
| 288 |
+
- **Volatile Organic Compounds (VOCs)** - Common workplace air contaminants
|
| 289 |
+
- **Metals Panel** - Comprehensive metals analysis for industrial settings
|
| 290 |
+
- **Welding Fumes** - Specific metals and compounds from welding operations
|
| 291 |
+
- **Solvent Mixtures** - Common solvent combinations in manufacturing
|
| 292 |
+
- **Diesel Particulate Matter** - Complete diesel exhaust characterization
|
| 293 |
+
- **Pharmaceutical Compounds** - Active pharmaceutical ingredients (APIs)
|
| 294 |
+
|
| 295 |
+
Contact EMSL for current group profile offerings and pricing.
|
| 296 |
+
|
| 297 |
+
---
|
| 298 |
+
|
| 299 |
+
## Rental Equipment
|
| 300 |
+
|
| 301 |
+
EMSL offers a comprehensive rental program for industrial hygiene sampling equipment. This program supports clients who need temporary access to professional-grade sampling equipment.
|
| 302 |
+
|
| 303 |
+
*Detailed rental equipment information is available on pages 62-63 of the complete guide.*
|
| 304 |
+
|
| 305 |
+
### Available Equipment Categories
|
| 306 |
+
|
| 307 |
+
- **Air Sampling Pumps** - Personal and area sampling pumps
|
| 308 |
+
- **Calibration Equipment** - Flow calibrators and verification devices
|
| 309 |
+
- **Monitoring Instruments** - Real-time detection and monitoring
|
| 310 |
+
- **Sample Collection Media** - Filters, cassettes, sorbent tubes, badges
|
| 311 |
+
- **Field Equipment** - Tripods, stands, and mounting accessories
|
| 312 |
+
- **Specialized Instruments** - Thermal imaging, particle counters, gas detectors
|
| 313 |
+
|
| 314 |
+
### Free IH Sampling Pump Program
|
| 315 |
+
|
| 316 |
+
EMSL offers a **"free IH sampling pump program"** for clients when the analysis is performed by one of EMSL's IH laboratories. This program provides access to calibrated sampling pumps without rental fees, making it easier and more cost-effective to conduct industrial hygiene sampling.
|
| 317 |
+
|
| 318 |
+
---
|
| 319 |
+
|
| 320 |
+
## Contact Information
|
| 321 |
+
|
| 322 |
+
For more information about EMSL Analytical, Inc. and their Industrial Hygiene Laboratory Services:
|
| 323 |
+
|
| 324 |
+
- **Website:** Visit EMSL's website for the most current information
|
| 325 |
+
- **Phone:** Contact your nearest EMSL laboratory location
|
| 326 |
+
- **Email:** Reach out to customer service for quotes and technical support
|
| 327 |
+
|
| 328 |
+
**Corporate Headquarters:**
|
| 329 |
+
EMSL Analytical, Inc.
|
| 330 |
+
Cinnaminson, NJ USA
|
| 331 |
+
|
| 332 |
+
---
|
| 333 |
+
|
| 334 |
+
## Document Information
|
| 335 |
+
|
| 336 |
+
- **Title:** Industrial Hygiene Lab Services Guide
|
| 337 |
+
- **Edition:** 2023
|
| 338 |
+
- **Focus:** Methods and Threshold Values
|
| 339 |
+
- **Publisher:** EMSL Analytical, Inc.
|
| 340 |
+
- **Pages:** 63 pages (original document)
|
| 341 |
+
- **Format:** Reference guide for industrial hygiene professionals
|
| 342 |
+
|
| 343 |
+
---
|
| 344 |
+
|
| 345 |
+
## Navigation Tips for LLM Agents
|
| 346 |
+
|
| 347 |
+
This document is structured to facilitate easy navigation and information retrieval:
|
| 348 |
+
|
| 349 |
+
1. **Use the Table of Contents** to jump to major sections
|
| 350 |
+
2. **Section headers** use standard Markdown hierarchy (##, ###, ####)
|
| 351 |
+
3. **Tables** organize complex data for easy parsing
|
| 352 |
+
4. **Bold text** highlights key terms and important information
|
| 353 |
+
5. **Lists** break down complex information into digestible items
|
| 354 |
+
6. **CAS numbers** provide unique identifiers for chemical lookups
|
| 355 |
+
7. **Cross-references** link related information throughout the document
|
| 356 |
+
|
| 357 |
+
### Key Search Terms
|
| 358 |
+
|
| 359 |
+
When searching this document, use these terms:
|
| 360 |
+
- Analyte names (e.g., "Acetone", "Benzene")
|
| 361 |
+
- CAS numbers (e.g., "67-64-1")
|
| 362 |
+
- Test methods (e.g., "NIOSH 2016M", "OSHA PV2119")
|
| 363 |
+
- Regulatory terms (e.g., "PEL", "TWA", "STEL", "Ceiling")
|
| 364 |
+
- Service types (e.g., "air sampling", "wipe sampling", "bulk analysis")
|
| 365 |
+
- Equipment (e.g., "pump", "media", "calibration")
|
| 366 |
+
|
| 367 |
+
---
|
| 368 |
+
|
| 369 |
+
*This Markdown document was created from the EMSL Analytical, Inc. Industrial Hygiene Lab Services Guide (2023 Edition) to facilitate LLM agent navigation and information retrieval.*
|
RAG-KB/Metals clearance criteria-QVC.md
ADDED
|
@@ -0,0 +1,622 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# BROOKHAVEN NATIONAL LABORATORY
|
| 2 |
+
|
| 3 |
+
**Safety & Health Services Division - Industrial Hygiene Group**
|
| 4 |
+
**Standard Operating Procedure**
|
| 5 |
+
|
| 6 |
+
| | |
|
| 7 |
+
|---|---|
|
| 8 |
+
| Number | IH75190 |
|
| 9 |
+
| Revision | Rev23 |
|
| 10 |
+
| Date | 06/23/17 |
|
| 11 |
+
| Page | 1 OF 16 |
|
| 12 |
+
|
| 13 |
+
**Subject: Surface Wipe Sampling for Metals**
|
| 14 |
+
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
*The only official copy is on-line at the SHSD website.*
|
| 18 |
+
*Before using a printed copy, verify that it is current by checking the document issue date on the website.*
|
| 19 |
+
|
| 20 |
+
---
|
| 21 |
+
|
| 22 |
+
# IH75190
|
| 23 |
+
# Surface Wipe Sampling for Metals
|
| 24 |
+
|
| 25 |
+
## 1.0 Purpose & Scope
|
| 26 |
+
|
| 27 |
+
This document describes a field procedure for taking wipe samples for metals on surfaces. It is based on methodology described in NIOSH 9100 "Lead in Surface Wipe Samples" of the NIOSH Manual of Analytical Methods.
|
| 28 |
+
|
| 29 |
+
The goal of the procedure is to provide a uniform methodology to collect representative samples. Using this method will ensure repeatability between various sampling personnel and between surface configurations. It is used for characterizing surface levels for the following reasons:
|
| 30 |
+
|
| 31 |
+
- Decommissioning operational areas
|
| 32 |
+
- Evaluating the effectiveness of clean-up of a spill
|
| 33 |
+
- Evaluating compliance with housekeeping levels in operational areas
|
| 34 |
+
- Characterizing a piece of equipment for release.
|
| 35 |
+
|
| 36 |
+
## 2.0 Responsibilities
|
| 37 |
+
|
| 38 |
+
**2.1 Demonstrated Competency:** This procedure is administered through persons who have demonstrated competency in performing this procedure in accordance with Section 7 are qualified to use this procedure.
|
| 39 |
+
|
| 40 |
+
**2.2 Chain of Custody procedures:** The qualified sampler is responsible for samples until they have been properly transferred to the IH Group laboratory using the *IH51200 IH Laboratory Equipment & Sample Processing* procedure.
|
| 41 |
+
|
| 42 |
+
**2.3 Hazard Analysis of the Sampling Task:** It is the responsibility of persons using this method and their supervisors to:
|
| 43 |
+
|
| 44 |
+
- Use appropriate personal protective equipment; see section 5.3.
|
| 45 |
+
- Obtain required training and qualification for hazards in areas.
|
| 46 |
+
- Comply with all work planning and work permit system requirements.
|
| 47 |
+
|
| 48 |
+
## 3.0 Definitions
|
| 49 |
+
|
| 50 |
+
**Surface Wipe-** a technique for the determination of metal on surfaces conducted by wiping the loose dust from the surface with a cloth/paper media and analysis of the metal on the media by laboratory or XRF measurement.
|
| 51 |
+
|
| 52 |
+
Definitions associated with surface wipe criteria are cited in Attachment 9.3
|
| 53 |
+
|
| 54 |
+
## 4.0 Prerequisites
|
| 55 |
+
|
| 56 |
+
**Area Access:**
|
| 57 |
+
|
| 58 |
+
4.1 Training for hazards may be needed for entry into areas with hazards, such as radiological areas..
|
| 59 |
+
|
| 60 |
+
4.2 Contact the appropriate Facility Support Representative or Technician to obtain approval to enter radiological areas.
|
| 61 |
+
|
| 62 |
+
4.3 Review and sign the Work Permit or Radiological Work Permit if needed.
|
| 63 |
+
|
| 64 |
+
4.4 Use appropriate PPE for area.
|
| 65 |
+
|
| 66 |
+
## 5.0 Precautions
|
| 67 |
+
|
| 68 |
+
**5.1 Hazard assessment:** Taking surface wipe samples may cause some exposure to health risks. Sampling may be performed in areas with metal, chemical or radiological contamination. These hazards must be assessed on a case-by-case basis by a competent individual knowledgeable of the hazards of the area.
|
| 69 |
+
|
| 70 |
+
**5.2 Job Risk Assessment:** Consult the Job Risk Assessment SHSD-JRA-05 for the risk analysis of this operation based on the hazards and controls of this SOP.
|
| 71 |
+
|
| 72 |
+
**5.3 Personal Protective Equipment:** Use appropriate personal protective equipment when implementing this procedure.
|
| 73 |
+
|
| 74 |
+
- **Hand:** Use gloves in areas of known or suspected metal, chemical or radiological contamination. Exam-style, splash gloves are acceptable. Acceptable polymers are: Nitrile, PVC, and Natural Rubber. The gloves must have sufficient impermeability to the surface contaminant and solvent used on the collection media to allow safe handling. See Table 1.
|
| 75 |
+
- **Body:** Use a disposable suit if contact of the body with contaminated surfaces is anticipated. Acceptable chemical protective equipment materials include: Tyvek®, KleenGuard®, and cotton. Contact the ECR for disposable of garments. If personal clothing items become contaminated, they must be surrender for BNL cleaning or disposal.
|
| 76 |
+
- **Foot:** Use disposable shoe coverings, boots or booties if contact of the feet with contaminated surfaces is anticipated. Acceptable material include: Tyvek®, KleenGuard®, and rubber. If personal shoes become contaminated, they must be surrendered for BNL cleaning or disposal.
|
| 77 |
+
- **Respiratory:** Under normal use, respiratory protection is not required. Use a respirator in an area with the potential to exceed the OSHA, ACGIH, or DOE standards. The person collecting using respiratory protection must comply with the BNL Respiratory Protection Program.
|
| 78 |
+
- **Eye:** Use safety glasses with side shields in laboratories, construction, and general industry areas.
|
| 79 |
+
|
| 80 |
+
**5.4 Radioactive Concerns:** It is possible that some surfaces to be tested may have radioactive contamination. In these cases, personal protective equipment and administrative controls must be implemented for the radiological contaminant hazard.
|
| 81 |
+
|
| 82 |
+
In addition, the collected sample must be analyzed for the radiological hazard before it can be submitted to the IH Group for analysis. The radiological contamination must be below the permissible release limits to the general public.
|
| 83 |
+
|
| 84 |
+
**5.5 Work Planning:** All requirements of work permits and work planning system reviews must be met in performing this procedure.
|
| 85 |
+
|
| 86 |
+
**5.6 Personal Hygiene:** Remove PPE and wash hands after sampling and before eating or drinking.
|
| 87 |
+
|
| 88 |
+
**5.7 Environmental Impact and Waste Disposal:** This technique does not have adverse impact on the environment. Based on WMD testing of similar PPE material, the templates and gloves can be disposed as normal trash. See Attachment 9.4.
|
| 89 |
+
|
| 90 |
+
## 6.0 Procedure
|
| 91 |
+
|
| 92 |
+
### 6.1 Equipment
|
| 93 |
+
|
| 94 |
+
| Item | Description |
|
| 95 |
+
|------|-------------|
|
| 96 |
+
| **Sample container (either):** | Bag, plastic, sealable with "zip" type seal. |
|
| 97 |
+
| | Vial, glass or plastic. (Glass is needed for hexane solvents based samples). |
|
| 98 |
+
| **Sample media (any of these):** | Gauze: 2" x 2" or 4" x 4" cotton gauze |
|
| 99 |
+
| | Paper: Ashless quantitative filter paper (typical diameter is 1.5 to 4 inches) |
|
| 100 |
+
| | Pre-moistened wipe: manufacturer foil wrapped, solvent soaked disposable cloths (such as GhostWipes or LeadWipe |
|
| 101 |
+
| | • The type of wipe is dependent on the lab to be used. Check with the lab for appropriate media for the metals to be analyzed. |
|
| 102 |
+
| | • For multiple metals, check with the lab to ensure they can all be done on a single wipe |
|
| 103 |
+
| **Gloves** | Appropriate for contaminant and solvent (see Table 1) and site hazards. |
|
| 104 |
+
| **Solvent** | Distilled water, Isopropanol, ethanol, methanol, n-hexane, or pre-moistened. See Table 1 for recommended solvent for each contaminant. |
|
| 105 |
+
| **Template** | Plastic sheet or cardboard: See Table 1 for size needed |
|
| 106 |
+
| | • 100cm2: 10 cm x 10 cm square –or- circle of 11.24 cm diameter. |
|
| 107 |
+
| | • 1ft2: 1foot x 1 foot, or other shape totaling 144 in2. |
|
| 108 |
+
|
| 109 |
+
### 6.2. Wipe Technique
|
| 110 |
+
|
| 111 |
+
BNL SHSD IH Group has selected the NIOSH method of collecting wipe samples. For uniformity, this method should be used for all sampling surface to be sampled (Visually depicted in Figure A)
|
| 112 |
+
|
| 113 |
+
**Figure A: NIOSH Surface Wipe Method**
|
| 114 |
+
|
| 115 |
+
[Figure shows three-step wiping process: 1. First Wipe using whole pad in S-pattern, 2. Second Wipe using half pad (folded) in S-pattern at right angles, 3. Third Wipe using quarter pad (folded again) in S-pattern. With each step, fold the exposed surface inward. Final step 4 shows folding to put in bag/bottle with label.]
|
| 116 |
+
|
| 117 |
+
**6.2.1** Use a moistened sample media or pre-moistened wipe (e.g. GhostWipe™). Apply only enough solvent to moisten approximately 80% of the area of the media. Avoid excess solvent on the filter or pad as it may cause drips and running on the surface thus diluting the sample.
|
| 118 |
+
|
| 119 |
+
### Table 1
|
| 120 |
+
|
| 121 |
+
| Contaminant | Media(1) | Solvent(2) | PPE Glove(2) Disposable Style | Sample Size |
|
| 122 |
+
|-------------|----------|------------|-------------------------------|-------------|
|
| 123 |
+
| **Lead** | Gauze or Filter | 1 -2 ml Distilled Water | Natural Latex Rubber, Nitrile, PVC, or Polyethylene | 1 square foot, 100 cm2 requires advanced approval by IH professional verifying that sensitivity is adequate |
|
| 124 |
+
| | Pre-moistened Wipe (should be cut in half) (3) | n/a | | |
|
| 125 |
+
| **Beryllium** | Gauze or Filter | 1 - 2 ml Distilled Water Isopropanol, Methanol, Ethanol | Natural Latex Rubber, Nitrile, PVC, or Polyethylene | 1 square foot minimum needed always |
|
| 126 |
+
| | Pre-moistened Wipe (should be cut in half) (4) | n/a | | |
|
| 127 |
+
| **Arsenic, Cadmium** | Gauze or Filter | 1-2 ml of Distilled Water | Natural Latex Rubber, Nitrile, PVC, or Polyethylene | 100 cm2 typically acceptable |
|
| 128 |
+
| | Pre-moistened Wipe (should be cut in half) (4) | n/a | | |
|
| 129 |
+
| **Hexavalent Chromium** | Preferred Medias: See Attachment 9.2 | None: For chrome plating operations, see stabilizing solution in Attachment 9.2. | Powderless: Natural Latex Rubber, Nitrile, PVC, or Polyethylene | 100 cm2 typically acceptable |
|
| 130 |
+
|
| 131 |
+
**Notes for Table 1:**
|
| 132 |
+
|
| 133 |
+
(1) Some pre-moistened media may not be compatible is certain laboratory analytical equipment. Check with the laboratory analyzing the samples prior to sampling to ensure the brand of media is compatible.
|
| 134 |
+
|
| 135 |
+
(2) Solvent: The solvent is not critical for lead, beryllium, and most heavy metals such as cadmium, nickel, and chromium. In doing wipes for these compounds, it is allowable to choose the solvent that will have the least impact (residues) on the owner of the equipment being sampled (i.e. some equipment is sensitive to water residues and an alcohol or other solvent may be preferred by the equipment owner.)
|
| 136 |
+
|
| 137 |
+
(3) Selection criteria: Breakthrough time greater than 1 hour of continuous contact. Source of data is DOE Guidelines for the Selection of Chemical Protective Clothing, 1991.
|
| 138 |
+
|
| 139 |
+
(4) The use of full size pre-moistened may cause the sample not to meet the minimum level of detection. To increase sensitivity, cut wipe in half to reduce the size of the wipe.
|
| 140 |
+
|
| 141 |
+
**6.2.2** Place the template over the area to be sampled or measure out 1 ft2 or 100-cm2 surface area, as per Table 1. If the object has a total surface area of less than 1 ft2 or 100 cm2, sample the whole surface area, if possible, and record the surface area. If the surface does not allow the use of a template, carefully determine the dimensions that will equal 1 ft2 or 100 cm2.
|
| 142 |
+
|
| 143 |
+
**6.2.3** Wipe the surface with firm pressure, using "S" strokes, covering the entire surface (edge to edge). If the surface is very rough (such as concrete), a dabbing action may be substituted for the full contact pressure rubbing of the media across the surface. When dabbing, make sure to completely cover the same area as in the S-stroke wipe. Indicate dabbing done on sample form.
|
| 144 |
+
|
| 145 |
+
Fold the exposed side of the pad or filter inward (i.e. fold in half).
|
| 146 |
+
|
| 147 |
+
**6.2.4** Using the once-folded media, wipe the same area S-strokes (see Figure A), starting at right angles to the first wipe. Fold the exposed side of the pad or filter inward.
|
| 148 |
+
|
| 149 |
+
**6.2.5** Using the twice-folded media, wipe with S-strokes (see Figure A) starting at the original point and wipe in the same direction. Fold the exposed side of the pad or filter in.
|
| 150 |
+
|
| 151 |
+
**6.2.6** Place the media in a plastic bag or vial. Seal the zip lock or vial. Record the sample identification on the bag or vial.
|
| 152 |
+
|
| 153 |
+
**6.2.7** Thoroughly clean reusable templates or discard paper templates in preparation of the next sample. Based on WMD testing of similar material, templates can be disposed as normal trash.
|
| 154 |
+
|
| 155 |
+
**6.2.8** Remove gloves by pulling them off inside-out and discard appropriately before handling the next filter or pad.
|
| 156 |
+
|
| 157 |
+
**6.2.9** Record the sample identification, surface area sampled, and description of the sample and surface on the sample form (Attachment 9.5) in the electronic SHSD forms page Surface Wipe (Metals)- Field Sampling Records & Chain of Custody.
|
| 158 |
+
|
| 159 |
+
**6.2.10** Include 1 blank filter or pad (moisten and placed in bags or vials) with each set of samples (provide 1 blank per 6 samples).
|
| 160 |
+
|
| 161 |
+
### 6.3 Surface Wipe Technique for Hexavalent Chromium
|
| 162 |
+
|
| 163 |
+
See Attachment 9.2.
|
| 164 |
+
|
| 165 |
+
### 6.4 Determine HOW MANY samples to take
|
| 166 |
+
|
| 167 |
+
It is not possible to provide definitive guidance on the number of samples to be taken in every case. Table 2 provides general guidance on which to base professional judgment determining the number of samples. Factors that should be considered in selecting the number of samples include: the size of the area to be tested, the predicted uniformity of contamination over the surface area, and the eventual fate of the surface area (disposal, remediation, background measurement, etc.)
|
| 168 |
+
|
| 169 |
+
If more than six (6) samples are to be taken, it is suggested that at least one (1) duplicate sample be taken in close proximity to one other to verify the precision (repeatability) of the sampling.
|
| 170 |
+
|
| 171 |
+
### Table 2: Statistical sampling plan
|
| 172 |
+
|
| 173 |
+
| Surface Configuration | Minimum Number of Samples | Qualifier |
|
| 174 |
+
|-----------------------|---------------------------|-----------|
|
| 175 |
+
| Entire Surface is less than 100 cm2 (example: a small article) | 1 | If possible, sample the whole item, one sample is usually sufficient. |
|
| 176 |
+
| Surface Area of object or area is greater than 100 cm2 but only a few square feet (example: table top on which a process is done) | 1 | If only one sample is taken, select the area with highest potential contamination |
|
| 177 |
+
| Surface Area of object or area is greater than a few square feet (example: floor or wall of a room) | 1 - 3 | Ideally three samples are taken, but fewer samples may be taken depending on the purpose for sampling |
|
| 178 |
+
| Multiple surfaces in a large area with the same exposure potential to source (example, many rooms in a building with a common source such as the HVAC system) | 1 – 3 for each surface, 6 or more for the whole area | Assumes all the surfaces have similar exposure potential, else treat each area separately. |
|
| 179 |
+
|
| 180 |
+
### 6.5 Determine WHAT KIND of samples (LOCATION)
|
| 181 |
+
|
| 182 |
+
Consider these locations when characterizing levels of surface metals:
|
| 183 |
+
|
| 184 |
+
- surfaces that are frequently accessed,
|
| 185 |
+
- surfaces that hazardous metal object rest on,
|
| 186 |
+
- surfaces that are infrequently cleaned or disturbed (such as top of cabinets or high shelves)
|
| 187 |
+
- sources of the contamination (such as process equipment, lab apparatus, site of known spills),
|
| 188 |
+
- areas where contamination is not expected (these serve as a control), and
|
| 189 |
+
- areas where contamination would not be permissible (such as lunch rooms).
|
| 190 |
+
|
| 191 |
+
### 6.6 Results interpretation
|
| 192 |
+
|
| 193 |
+
Normalize the units of sampling results from the laboratory to the base units of the Surface Level Criteria Requirements & Recommendations listed in Attachment 9.3.
|
| 194 |
+
|
| 195 |
+
Conversion of data between various laboratory reporting units of measures: Data can be converted from the various regulatory reporting and laboratory reporting units of measure based on the following values: 1 sq.ft. = 929 cm2 1 mg = 1000 ug
|
| 196 |
+
|
| 197 |
+
| Convert form: | Multiply by |
|
| 198 |
+
|--------------|-------------|
|
| 199 |
+
| ug/100 cm2 to ug/sq. ft | 9.29 |
|
| 200 |
+
| ug/sq. ft to ug/100 cm2 | 0.1076 |
|
| 201 |
+
|
| 202 |
+
### 6.7 Posting equipment or areas
|
| 203 |
+
|
| 204 |
+
Consult with Attachment 9.1 for recommended wording to be used for labelling equipment or areas when a warning is needed for toxic metal hazards.
|
| 205 |
+
|
| 206 |
+
### 6.8 Reporting results
|
| 207 |
+
|
| 208 |
+
Convey the assessment of results to the requestor of the sampling, in a written analysis documenting: sampling and analysis methods, contamination levels measured, compliance with regulatory and recommended levels, and recommended corrective actions (if necessary).
|
| 209 |
+
|
| 210 |
+
## 7.0 Implementation and Training
|
| 211 |
+
|
| 212 |
+
**Qualification Criteria:** Use of this SOP is limited to persons who have demonstrated the competency to satisfactorily use the procedure, as evidenced by experience and training. All persons must have demonstrated competency in the qualification criteria set in the Job Performance Measure (Attachment 9.6.) or e-Exam IH75190. Qualification on this JPM is required on a 3 year basis.
|
| 213 |
+
|
| 214 |
+
## 8.0 References
|
| 215 |
+
|
| 216 |
+
8.1 ACGIH: Threshold Limit Values 2005
|
| 217 |
+
|
| 218 |
+
8.2 DOE: 10CFR 850 Chronic Beryllium Disease Prevention Program
|
| 219 |
+
|
| 220 |
+
8.3 EPA: Toxic Substance Control Act (TSCA) 40CFR745.227
|
| 221 |
+
|
| 222 |
+
8.4 Ness, S.A.; Surface and Dermal Monitoring for Toxic Exposures, Van Nostrand Reinhold, 1994.
|
| 223 |
+
|
| 224 |
+
8.5 NIOSH: Manual of Analytical Method, Method 9100: Lead in Surface Wipe Samples.
|
| 225 |
+
|
| 226 |
+
8.6 OSHA: 29CFR1910.1000 Table Z1, Z2; and 1910.1027.
|
| 227 |
+
|
| 228 |
+
8.7 OSHA: Technical Manual Section II, Chapter 2.
|
| 229 |
+
|
| 230 |
+
## 9.0 Attachments
|
| 231 |
+
|
| 232 |
+
9.1 Sample of Signs for Areas and Equipment
|
| 233 |
+
|
| 234 |
+
9.2 Wipe Sampling Technique for Hexavalent Chromium
|
| 235 |
+
|
| 236 |
+
9.3 Surface Wipe Criteria Requirements & Recommendations
|
| 237 |
+
|
| 238 |
+
9.4 Environmental Evaluation of Surface Wipe Sampling
|
| 239 |
+
|
| 240 |
+
9.5 Sample of Surface Contamination Sampling Form
|
| 241 |
+
|
| 242 |
+
9.6 SHSD Job Performance Measure (JPM) Completion Certificate
|
| 243 |
+
|
| 244 |
+
## 10.0 Procedure Documentation
|
| 245 |
+
|
| 246 |
+
**ISM Review - Hazard Categorization:** High; Moderate; Low/Skill of the craft
|
| 247 |
+
|
| 248 |
+
**Validation:** Formal Walkthrough Desk Top Review SME Review
|
| 249 |
+
|
| 250 |
+
### Revision Log
|
| 251 |
+
|
| 252 |
+
| Rev | Description |
|
| 253 |
+
|-----|-------------|
|
| 254 |
+
| 0 | New document. Prepared By R. Selvey, CIH 02/25/2000; Technical Reviewed By: N. Bernholc, CIH 02/27/00; RCD Facility Support Approved By: 04/22/01 N. Foster Procedure Committee Review; QA Review : E. Tucker; SHSD Approved By: R. Selvey 03/02/2000 |
|
| 255 |
+
| 1 | Revised for minor correction noted in training classes. Reviewed By: R. Selvey 10/6/00 |
|
| 256 |
+
| 2 | Added new format, SBMS header and reviewed sections on Hazard assessment, PPE. Added Waste Disposal and Environmental Impact text. Reviewed By: R. Selvey 02/05/01 |
|
| 257 |
+
| 3 | Minor format change. Converted SOP number from IH-FP-3.2 to new system IH75190. Reviewed By: R. Selvey 03/09/01 |
|
| 258 |
+
| 4 | Revised to include RCD Facility Support Procedure Committee Review comments. Reviewed By: R. Selvey 04/22/01 |
|
| 259 |
+
| 5 | Updated Table 1 adding Arsenic and Cadmium Media. Update Table 3 with Arsenic and Cadmium Release Criteria and update EPA Lead Criteria. Reviewed By: R. Selvey 04/10/02 |
|
| 260 |
+
| 6 | Updated Table 1 to correct error in lead criteria. Insert Section 7 and transfer information from section 4. Renumbered attachments. Reviewed By: R. Selvey 4/17/02 |
|
| 261 |
+
| 7 | Added Best Management Practice release criteria for Arsenic and Cadmium to Table 3. Reviewed By R. Selvey 08/16/02: |
|
| 262 |
+
| 8 | Added Best Management Practice release criteria for Nickel to Table 3. Reviewed By: R. Selvey 10/17/02 |
|
| 263 |
+
| 9 | Full review of SOP. Significant text changes. Deleted OSHA Method for procedure & PCB criteria. Updated Attachments 9.1 and 9.2. Added Attachment 9.3. Reviewed By: R. Selvey 05/21/04 |
|
| 264 |
+
| 10 | Added reference and link to JRA-05 in 5.1. Added text to 6.2.2 to clarify using Table 1 to determine 100cm2 versus 1 sq ft. Changed "S-stroke" wording in 6.2.3.through 6.2.5 to avoid confusion with the S-stroke used the Health Physics terminology. The two patterns are different. Changed the qualification criteria in Section 7 to reflect the unified qualification policy. Updated the Sample form (Attachment 9.1) to reflect the Compliance Suite order of sample numbering. Reviewed By: R. Selvey 02/21/06 |
|
| 265 |
+
| 11 | Reworded the "S-stroke" wording in 6.2.3.through 6.2.5 to avoid confusion with the S-stroke used the Health Physics terminology. Passage on "dabbing" was modified to indicate that the dabbing action replacing pulling the media, but does not replace the S-pattern. Minor typo corrections in Section 5 and 6. Reviewed By: R. Selvey 02/21/06 |
|
| 266 |
+
| 12 | Section 6.3 was added with a reference to new Attachment 9.4; Table 1: was updated to include hexavalent chromium. Attachment 9.4 was added to include Liberty Mutual Wipe Sample Method. Liberty Mutual method was added. Section 8 References and Attachment 9.4 was added and included in Section 9.0 Attachments. Reviewed By: J. Peters 11/28/06; Reviewed By: R. Selvey 12/05/06 |
|
| 267 |
+
| 13 | Added Section 4.1, 4.2 and 5.6. Revised 5.2. Added document control to attachment 9.3 and 9.4. Reviewed By: R. Selvey 05/23/07 |
|
| 268 |
+
| 14 | Table 3: Updated to include Cobalt and description of calculation. Changed IH training link in Step 7.1. Reviewed By: M.Chuc 09/22/08 Reviewed By: R. Selvey 10/13/08 |
|
| 269 |
+
| 15 | Added Attachment 9.5. Reviewed By: R. Selvey 02/09/09 |
|
| 270 |
+
| 16 | Edited section 4.0 and 5.2 for brevity. Added definition for Release and Housekeeping Criteria. Changed Cr6 release level based on OSHA recommendation. Added ANSI Caution to Attachment 9.1 sign. Revised directions in Attachment 9.2. Reviewed By: R. Selvey 03/21/11 |
|
| 271 |
+
| 17 | Full review of steps 1 to 7. Expanded and revised Release and Housekeeping Criteria definitions in Section 3 and in Table3. Reviewed By: R. Selvey 04/27/11 |
|
| 272 |
+
| 18 | Corrected error in units in section 3: mg/100cm2 to ug/100 cm2. Reviewed By: R. Selvey 05/10/11 |
|
| 273 |
+
| 19 | Edited Section s 2 and 7 to remove reference to rescinded HP65100. Changed format of Section 9. Reviewer: R. Selvey 03/04/14 |
|
| 274 |
+
| 20 | Total review and revision. Replaced Table 3 with Appendix 9.3 and added OSHA Technical Manual ratio. Removed criteria for Al, Ba, Co, Cu, Hf, In, Mn, Mo, Pt, Rh, Se, Ag, Ta, Te, Tl, Sn, W, Y, Yt, and Zr. Added link to e-Exam and e-form. Added short-life disclaimer to Cr6 in Attachment 9.2. Revised by: R. Selvey 06/13/8/16 |
|
| 275 |
+
| 21 | Revised Attachment 9.3 to correct Cr+6. Added column for ug/sq ft. Corrected error in Table 1 Attachment 3. Revised by; R. Selvey 09/13/16. |
|
| 276 |
+
| 22 | Revised Attachment 9.3 to remove no-regulated Nickel and CrIII and adjusted values for Arsenic and CrVI to match OSHA Housekeeping philosophy. Added proposed changes for all release criteria to allow comments on impact. Revised by; R. Selvey 05/01/17. |
|
| 277 |
+
| 23 | Team reviewed revision to Attachment 9.3. Values aligned with OSHA, EPA/HUD and DOE policies. Approved by: R. Selvey 06/23/17 |
|
| 278 |
+
|
| 279 |
+
---
|
| 280 |
+
|
| 281 |
+
# Attachment 9.1
|
| 282 |
+
## Samples of Signs for Areas and Equipment
|
| 283 |
+
|
| 284 |
+
---
|
| 285 |
+
|
| 286 |
+
### CAUTION
|
| 287 |
+
|
| 288 |
+
**Cadmium Surface Contamination**
|
| 289 |
+
|
| 290 |
+
Some surfaces in this area have Cadmium levels above BNL Guidelines
|
| 291 |
+
|
| 292 |
+
- Do NOT perform operations that causes the dust to become airborne (such as using an air hose to clean surfaces or dry sweeping)
|
| 293 |
+
- Contact SHSD IH Group x-7475 prior to Building Renovations or Demolition
|
| 294 |
+
- Wash hands prior to eating, drinking, chewing gum, or smoking
|
| 295 |
+
- Do not eat or drink in this area.
|
| 296 |
+
|
| 297 |
+
---
|
| 298 |
+
|
| 299 |
+
### CLEAN
|
| 300 |
+
|
| 301 |
+
The material on this pallet is below (i.e. cleaner than) the SHSD Best Management Practice Surface Release Guidelines for Lead and Cadmium
|
| 302 |
+
|
| 303 |
+
It is appropriate to be released and used anywhere at BNL without any specific precautions.
|
| 304 |
+
|
| 305 |
+
---
|
| 306 |
+
|
| 307 |
+
### Exceeds Guidelines for Lead or Cadmium
|
| 308 |
+
|
| 309 |
+
The material on this pallet is above (i.e. not cleaner than) the SHSD Best Management Practice Surface Release Guidelines for Lead and/or Cadmium
|
| 310 |
+
|
| 311 |
+
Specific precautions are needed in areas where this material is used or stored.
|
| 312 |
+
|
| 313 |
+
- No operations that cause airborne dust (such as air hoses, blowers, or dry sweeping)
|
| 314 |
+
- Wash hands prior to eating, drinking, chewing gums, or smoking.
|
| 315 |
+
- Do not eat or drink in this area.
|
| 316 |
+
- Notify occupants of the area of the presence of Lead/Cadmium on these surfaces.
|
| 317 |
+
|
| 318 |
+
---
|
| 319 |
+
|
| 320 |
+
# Attachment 9.2
|
| 321 |
+
## WIPE SAMPLING TECHNIQUE FOR HEXAVALENT CHROMIUM
|
| 322 |
+
|
| 323 |
+
**Note:** Hexavalent Chromium has a short life on surfaces. Sampling and analyzed needs to be completed within a few days of generation. For sampling of long term dust accumulations, use Cr3 sampling.
|
| 324 |
+
|
| 325 |
+
### Materials supplied by the lab:
|
| 326 |
+
|
| 327 |
+
**Sampling media:**
|
| 328 |
+
|
| 329 |
+
- For chrome plating: PVC or binderless quartz filter. All other operations:
|
| 330 |
+
- 5 um, 37-mm PVC filter for smooth surfaces
|
| 331 |
+
- 0.45 mm thick 37-or 47-mm binderless quartz fiber filter for rough surfaces (preferred media for both smooth and rough surfaces)
|
| 332 |
+
- Immediately after sampling, place the filter sample in a vial containing 10% Na2CO3 with 2% NaHCO3 to stabilize the Cr+6.
|
| 333 |
+
- Do not use Ghost wipe®, Whatman, mixed cellulose ester (MCE) or glass fiber filter as they convert Cr+6 to Cr+3.
|
| 334 |
+
|
| 335 |
+
**Additional materials:**
|
| 336 |
+
|
| 337 |
+
- Template (10 cm x 10 cm)
|
| 338 |
+
- Teflon coated or plastic tweezers
|
| 339 |
+
- Empty glass vials
|
| 340 |
+
- Glass vials containing 5 ml aqueous solution of 10% Na2CO3 with 2% NaHCO3 for chrome plating samples
|
| 341 |
+
- Powderless gloves
|
| 342 |
+
|
| 343 |
+
### Sampling Technique:
|
| 344 |
+
|
| 345 |
+
1. Prepare a sufficient number of vials, each labeled with a unique number.
|
| 346 |
+
|
| 347 |
+
2. Sketch a diagram of the room or area to be sampled.
|
| 348 |
+
|
| 349 |
+
3. Wear a new pair of clean gloves for each sample. DO NOT use powdered gloves.
|
| 350 |
+
|
| 351 |
+
4. Record the sample vial number and location where the sample is taken.
|
| 352 |
+
|
| 353 |
+
5. Remove the filter from the carrying container with a clean PTFE-coated tweezers or plastic tweezers. DO NOT use metal tweezers to handle the filters, as they could deposit Cr+6 onto the filters.
|
| 354 |
+
|
| 355 |
+
**Note:** Surfaces should not be wetted with water as the water will allow any metal interference to interact with Cr+6 thereby affecting the results.
|
| 356 |
+
|
| 357 |
+
6. Use firm pressure when wiping the surface. Start at the one corner moving to the opposite side then upward one wipe width and wipe back to the starting side. Repeat to cover the whole surface area. Fold inward and repeat wiping the entire surface again. Fold in and repeat a third time.
|
| 358 |
+
|
| 359 |
+
7. After wiping, fold the filter with the contaminant side inward. Place the filter immediately in the sample vial and cap. Filter samples taken in chrome plating operation must be placed in a vial containing 10% Na2CO3 with 2% NaHCO3 to stabilize the Cr+6.
|
| 360 |
+
|
| 361 |
+
8. Submit at least one blank wipe filter, treated in the same fashion, but without wiping.
|
| 362 |
+
|
| 363 |
+
9. Sample results will be reported as ug/100cm2. OSHA's target concentration is 0.050ug/100 cm2.
|
| 364 |
+
|
| 365 |
+
10. Ship samples immediately. If unable to ship immediately, keep cold then ship next day air to the lab.
|
| 366 |
+
|
| 367 |
+
---
|
| 368 |
+
|
| 369 |
+
# Attachment 9.3
|
| 370 |
+
## Required and Recommended Surface Wipe Criteria
|
| 371 |
+
### 06/26/17
|
| 372 |
+
|
| 373 |
+
| Compound | Criteria | | Criteria type | OSHA PEL |
|
| 374 |
+
|----------|----------|---|---------------|----------|
|
| 375 |
+
| | ug/100cm2 | ug/ft2 | R = Requirement; G= Guidance, Recommended, Non-regulatory | ug/m3 |
|
| 376 |
+
| **Arsenic (As) 29CFR1910.1018** | | | | |
|
| 377 |
+
| | 100 | 929 | G OSHA Regulated Areas [AFAP] & Operational Areas: Floors & accessible surfaces | 10 ug/m3 |
|
| 378 |
+
| | 6.7 | 62 | G Non-Operational Areas: Floors & accessible surfaces | |
|
| 379 |
+
| **Beryllium (Be) 10CFR850** | | | | |
|
| 380 |
+
| | 3.0 | 28 | R DOE Regulated Areas & Be Operational Areas: Floors & accessible surfaces [Housekeeping] | 2 ug/m3 |
|
| 381 |
+
| | 0.2 | 1.9 | G Non-Operational Areas & Public Areas: Floors & accessible surfaces | |
|
| 382 |
+
| | 3.0 | 28 | R Equipment Release to Be Operational Areas | |
|
| 383 |
+
| | 0.2 | 1.9 | R Equipment Release to Non-beryllium Area of a DOE facility & Public | |
|
| 384 |
+
| **Cadmium (Cd) 29CFR1910.1027** | | | | |
|
| 385 |
+
| | 50 | 465 | G OSHA Regulated Areas [AFAP] & Operational Areas: Floors & accessible surfaces | 5 ug/m3 [.1027] |
|
| 386 |
+
| | 3.3 | 31 | G Non-Operational Areas: Floors & accessible surfaces | 200 ug/m3 [Z.2] |
|
| 387 |
+
| **Chromium, hexavalent (Cr) VI 29CFR1910.1026** | | | | |
|
| 388 |
+
| | 50 | 465 | G OSHA Regulated Areas [AFAP] & Operational Areas: Floors & accessible surfaces | 5 ug/m3 |
|
| 389 |
+
| | 3.3 | 31 | G Non-Operational Areas: Floors & accessible surfaces | |
|
| 390 |
+
| **Lead (Pb) 29CFR1910.1025** | | | | |
|
| 391 |
+
| | 500 | 4645 | G Accelerator Operational Areas & OSHA Regulated Areas [AFAP]: Floors & accessible surfaces | 50 ug/m3 |
|
| 392 |
+
| | 50 | 465 | G Laboratory Operational Areas: Floors & accessible surfaces | |
|
| 393 |
+
| | 22 | 200 | G Non-Operational Areas: Floors & accessible surfaces | |
|
| 394 |
+
| | 22 | 200 | G OSHA 1926.62 Construction Sites: change areas, storage facilities, & lunchrooms [Housekeeping] | |
|
| 395 |
+
| | 4.3 | 40 | G Eating & food prep surfaces | |
|
| 396 |
+
| | 43 | 400 | G Public/Lodging/Childcare- Window troughs | |
|
| 397 |
+
| | 27 | 250 | G Public/Lodging/Childcare- Window sills | |
|
| 398 |
+
| | 4.3 | 40 | G Public/Lodging/Childcare- Floors, Eating & food prep surfaces | |
|
| 399 |
+
| **Acrylonitrile 29CFR1910.1045** | | | | |
|
| 400 |
+
| | 43 | 400 | G OSHA Regulated Areas [AFAP] & Operational Areas: Floors & accessible surfaces | [2 ppm] 4.3 ug/m3 |
|
| 401 |
+
| **Dibromodicloropropane 29CFR1910.1044** | | | | |
|
| 402 |
+
| | 1.0 | 9.3 | G OSHA Regulated Areas [AFAP] & Operational Areas: Floors & accessible surfaces | [1 ppb] 0.01 ug/m3 |
|
| 403 |
+
| **Methylenedianiline 29CFR1910.1050** | | | | |
|
| 404 |
+
| | 0.8 | 7.5 | G OSHA Regulated Areas [AFAP] & Operational Areas: Floors & accessible surfaces | [10 ppb] 0.08 ug/m3 |
|
| 405 |
+
|
| 406 |
+
### Definition (for purposes of the table above):
|
| 407 |
+
|
| 408 |
+
**AFAP:** As Free As Practicable; Housekeeping- All surfaces shall be maintained as free as practicable of accumulations of [OSHA Regulated Substances]: Arsenic: 1910.1018(k); Cadmium: 1910.1027(k); Chromium: 1910.1026(j); Lead: 1910.1025(h); Acrylonitrile: 1910.1045(k) DBCP: 1910.1044(k); MDA: 1910.1050(l).
|
| 409 |
+
|
| 410 |
+
The enumerated guidance criteria level is based on: OSHA Technical Manual; Section II: Chapter 2 Surface Contaminants, Skin Exposure, Biological Monitoring and Other Analyses; III. Wipe Sampling, Field Portable X-Ray Fluorescence Sampling, Dermal Sampling and Biological Monitoring; A. Surface Wipe Sampling.
|
| 411 |
+
|
| 412 |
+
**Accessible surfaces:** Surfaces that can reasonably be expected to be contacted during typical operations. This would include table tops, desks tops, and other surfaces where contact with hands, arms and body are likely. [BNL]
|
| 413 |
+
|
| 414 |
+
**Eating & Food Prep Surfaces** = Surfaces on which food preparation, eating & drinking are done. This includes lunchroom counters/tables; kitchen counter tops, stove tops; water cooler surfaces; and tables/desks in offices/conference rooms where food and beverage consumption is permitted. [BNL]
|
| 415 |
+
|
| 416 |
+
**Equipment Release to Operational Area [Beryllium]** = Maximum removable contamination on equipment that is being released to a facility using the beryllium. Equipment must be labeled and sealed in impermeable bag or container. [DOE 10CFR850.31]
|
| 417 |
+
|
| 418 |
+
**Equipment Release to Operational Area [OSHA Regulated Substance]** = Maximum removable contamination on equipment that is being released to a facility using the regulated substance. [BNL]
|
| 419 |
+
|
| 420 |
+
**Equipment Release to Non-Operational Area or Public [Beryllium]** = Maximum removable contamination on equipment that is being released to the general public or to a non-beryllium area of a DOE facility. Equipment release is conditioned on the recipient's commitment to implement controls that will prevent foreseeable beryllium exposure, considering the nature of the equipment or item and its future use and the nature of the beryllium contamination. [DOE 10CFR850.31]
|
| 421 |
+
|
| 422 |
+
**Equipment Release to Non-Operational Area or Public [OSHA Regulated Substance]** = Maximum removable contamination on equipment that is being released to the general public or to a Non-Operational Area. [BNL]
|
| 423 |
+
|
| 424 |
+
**Housekeeping** = Maximum level allowed on accessible surfaces in Operational Areas during Non-Operational periods. Surfaces contaminated with dusts and waste must not exceed a removable contamination level criterion during Non-Operational periods. This sampling would not include the interior of installed closed systems such as enclosures, glove boxes, chambers, or ventilation systems. [DOE 10CFR850.30]
|
| 425 |
+
|
| 426 |
+
**Non-Beryllium Area** = Area where beryllium is not used in a DOE facility. [DOE 10CFR 850.31]
|
| 427 |
+
|
| 428 |
+
**Non-Operational Area [Beryllium]** = Area where beryllium is not used and where workers are not trained in hazards and controls. Personal hygiene control practices are not in place (hand washing is not expected on exiting the area) and eating & drinking are permitted. [BNL]
|
| 429 |
+
|
| 430 |
+
**Non-Operational Area [OSHA Regulated Substance]** = Area where an OSHA Regulated Substance is not used and where workers are not trained in hazards and controls. Personal hygiene control practices are not in place (hand washing is not expected on exiting the area) and eating & drinking are permitted. [BNL]
|
| 431 |
+
|
| 432 |
+
**Operational Area [Beryllium]** = Area where workers are routinely in the presence of beryllium as part of their work activity. [DOE 10CFR850.3]
|
| 433 |
+
|
| 434 |
+
**Operational Area [OSHA Regulated Substance]** = Area where workers are routinely in the presence of an OSHA Regulated Substance as part of their work activity. Workers who handle the substance have been trained in hazards and controls. Substances are routinely used, handled or stored and personal hygiene control practices are in place (e.g. eating, drinking are prohibited in the area; hand washing is expected on exiting the area). Examples: lead shielding blocks, shops, and accelerator areas using organic and inorganic metallic compounds. [BNL]
|
| 435 |
+
|
| 436 |
+
**OSHA Regulated Substance** = A substance regulated in 29CFR1910.1003-1054 in the expanded health standards:
|
| 437 |
+
|
| 438 |
+
- **Metals:**
|
| 439 |
+
- Arsenic 29CFR1910.1018;
|
| 440 |
+
- Cadmium 29CFR1910.1027;
|
| 441 |
+
- Chromium, hexavalent 29CFR1910.1026;
|
| 442 |
+
- Lead 29CFR1910.1025
|
| 443 |
+
|
| 444 |
+
- **Chemicals:**
|
| 445 |
+
- Acrylonitrile 29CFR1910.1045;
|
| 446 |
+
- Benzene 29CFR1910.1028;
|
| 447 |
+
- Dibromodicloro- propane 29CFR1910.1044;
|
| 448 |
+
- Formaldehyde 29CFR1910.1048;
|
| 449 |
+
- Methylenedianiline 29CFR1910.1050;
|
| 450 |
+
- Methylene Chloride 29CFR1910.1052;
|
| 451 |
+
|
| 452 |
+
- **OSHA 13 carcinogens** = 4-Nitrobiphenyl, Chemical Abstracts Service Register Number (CAS No.) 92933; alpha-Naphthylamine, CAS No. 134327; methyl chloromethyl ether, CAS No. 107302; 3,3'-Dichlorobenzidine (and its salts) CAS No. 91941; bis-Chloromethyl ether, CAS No. 542881; beta-Naphthylamine, CAS No. 91598; Benzidine, CAS No. 92875; 4-Aminodiphenyl, CAS No. 92671; Ethyleneimine, CAS No. 151564; beta-Propiolactone, CAS No. 57578; 2-Acetylaminofluorene, CAS No. 53963; 4-Dimethylaminoazo-benzene, CAS No. 60117; and N-Nitrosodimethylamine, CAS No. 62759. [OSHA]
|
| 453 |
+
|
| 454 |
+
**Public** = Persons who are not: DOE employees, BSA employees, contractors, sub-contractors, and persons with Student, Intern, User or Guest appointments. The public includes visitors and family members living in residence at Upton. They are not trained by BNL in hazards and controls of toxic substances. [BNL]
|
| 455 |
+
|
| 456 |
+
**Public/ Lodging/Childcare Areas** = Area open to the public for periods longer than short visits or tours or areas intended for frequent access by visitors and/or family members. Eating and drinking is allowed in public areas. Occupants are not trained in the hazards of the metal or control measures. Hand washing is not expected on exit of the area. Public areas include: Science Museum (935), Coin Laundry (363), Berkner Hall (388), Swimming Pool (462), Gymnasium (461), Brookhaven Center (30), Research Support Building (400), BNL Upton on-site housing: Cavendish (153), Compton (170), Curie (258), Fleming (180), Guest House (257), Danish House (388), Apartments, Efficiencies; and areas with high occupancy by children: Child Development Center (370), Recreation Hall (317), School House (373) [BNL]
|
| 457 |
+
|
| 458 |
+
**Regulated Area [Beryllium]** = Area demarcated by the responsible employer in which the airborne concentration of beryllium exceeds, or can reasonably be expected to exceed, the action level. [DOE 10CFR850.3]
|
| 459 |
+
|
| 460 |
+
**Regulated Area [OSHA Regulated Substance]** = Area where an OSHA Regulated Substance is used in a manner that airborne exposure levels exceed the Permissible Exposure Limit. Area is formally demarcated and access to the area is controlled to those meeting the entry requirements in the OSHA regulation. Personal hygiene control practices are in place; eating and drinking are prohibited; hand washing is expected on exiting the area. OSHA standards require these areas to be "As Free As Practicable". The OSHA Technical Manual (G1) provides a recommended method to enumerate AFAP [BNL]
|
| 461 |
+
|
| 462 |
+
---
|
| 463 |
+
|
| 464 |
+
# IH 75190 Attachment 9.4
|
| 465 |
+
## Environmental Evaluation of Surface Wipe Sampling for Chemicals/Metals
|
| 466 |
+
|
| 467 |
+
**Operation Description:** Field samples for potential metals or chemicals are collected on pre-moistened pads. This process concentrates toxic substances on the media. The wipes are either sent off-site for analysis or in some instances are analyzed at BNL by the IH Group using direct reading meters.
|
| 468 |
+
|
| 469 |
+
**Frequency of Operation:** 10 to 20 times per year.
|
| 470 |
+
|
| 471 |
+
**Environmental impact:**
|
| 472 |
+
|
| 473 |
+
- The wipes sampled at BNL are consumed in the analysis at the end of test by the off-site lab. Conformance with proper wipe disposal by the off-site vendor laboratory is validated to BNL IH Group's satisfaction in the AHIA Accreditation process.
|
| 474 |
+
- PPE used during sampling and the paper templates are disposed of at the direction of the EPD ECR. The current policy is for disposal as non-hazardous waste. This is justified because the concentration is too low to be of concern (a few micrograms per wipe surface).
|
| 475 |
+
|
| 476 |
+
**Waste Disposal:**
|
| 477 |
+
|
| 478 |
+
- PPE and paper templates are disposed of as non-hazardous waste, unless otherwise directed by EPD.
|
| 479 |
+
|
| 480 |
+
---
|
| 481 |
+
|
| 482 |
+
# Brookhaven National Laboratory
|
| 483 |
+
## Safety & Health Service Division
|
| 484 |
+
## Industrial Hygiene Group
|
| 485 |
+
|
| 486 |
+
# Surface Contamination Sampling Form
|
| 487 |
+
|
| 488 |
+
**BNL-IH75190 Attachment 9.5 Sample- Do not use**
|
| 489 |
+
|
| 490 |
+
**Analyte:**
|
| 491 |
+
|
| 492 |
+
_____ LEAD
|
| 493 |
+
|
| 494 |
+
_____ BERYLLIUM
|
| 495 |
+
|
| 496 |
+
_____ CADMIUM
|
| 497 |
+
|
| 498 |
+
_____ Other:
|
| 499 |
+
|
| 500 |
+
**DEPT:**
|
| 501 |
+
|
| 502 |
+
**BUILDING:**
|
| 503 |
+
|
| 504 |
+
**LOCATION NAME, ROOM NUMBER & DESCRIPTION:**
|
| 505 |
+
|
| 506 |
+
---
|
| 507 |
+
|
| 508 |
+
**Sample Media:** | **Solvent:** | **Surface Area Measurement:**
|
| 509 |
+
|
| 510 |
+
_____ Ghost Wipe | _____ Pre-Moistened | _____ Template
|
| 511 |
+
|
| 512 |
+
_____ Cotton Gauze | _____ Distilled Water | _____ Measured Area
|
| 513 |
+
|
| 514 |
+
Size: | _____ Hexane | _____ Estimated Area
|
| 515 |
+
|
| 516 |
+
_____ Filter Paper | _____ Isopropanol | Other:
|
| 517 |
+
|
| 518 |
+
Type & Size: | _____ Other:
|
| 519 |
+
|
| 520 |
+
_____ Other:
|
| 521 |
+
|
| 522 |
+
**REASON FOR SAMPLING:**
|
| 523 |
+
|
| 524 |
+
_____ Area Characterization
|
| 525 |
+
|
| 526 |
+
_____ Pre-Remediation
|
| 527 |
+
|
| 528 |
+
_____ Post Remediation
|
| 529 |
+
|
| 530 |
+
Other:
|
| 531 |
+
|
| 532 |
+
---
|
| 533 |
+
|
| 534 |
+
### Sample Identification
|
| 535 |
+
|
| 536 |
+
| Sample Number | Sample Location | Surface Type | Surface Area |
|
| 537 |
+
|---------------|-----------------|--------------|--------------|
|
| 538 |
+
| Bldg# MMDDYY Analyte Symbol Sample # | | Metal / Plastic / Glass /Painted Wood / Wood / Painted Concrete / Concrete | _____ 1 ft2 |
|
| 539 |
+
| | | | _____ 100 cm2 |
|
| 540 |
+
| | | | other: _____________________________ |
|
| 541 |
+
| | | | _____ 1 ft2 |
|
| 542 |
+
| | | | _____ 100 cm2 |
|
| 543 |
+
| | | | other: _____________________________ |
|
| 544 |
+
| | | | _____ 1 ft2 |
|
| 545 |
+
| | | | _____ 100 cm2 |
|
| 546 |
+
| | | | other: _____________________________ |
|
| 547 |
+
| | | | _____ 1 ft2 |
|
| 548 |
+
| | | | _____ 100 cm2 |
|
| 549 |
+
| | | | other: _____________________________ |
|
| 550 |
+
|
| 551 |
+
_____ Additional Samples next page
|
| 552 |
+
|
| 553 |
+
**Total Number of Samples:** ___________________
|
| 554 |
+
|
| 555 |
+
| SAMPLE DATE: | RELINQUISHED TO SHSD IH LAB BY: (SIGNATURE): | DATE /TIME: |
|
| 556 |
+
|--------------|---------------------------------------------|-------------|
|
| 557 |
+
| | | / |
|
| 558 |
+
|
| 559 |
+
| SAMPLES TAKEN BY: (Print Name and Signature) | RECEIVED BY SHSD IH LAB EMPLOYEE (SIGNATURE): | DATE /TIME: |
|
| 560 |
+
|---------------------------------------------|----------------------------------------------|-------------|
|
| 561 |
+
| / | | / |
|
| 562 |
+
|
| 563 |
+
*Sample of online form*
|
| 564 |
+
*Use e-Forms from SHSD web page current version*
|
| 565 |
+
|
| 566 |
+
---
|
| 567 |
+
|
| 568 |
+
# IH75190 Attachment 9.6
|
| 569 |
+
|
| 570 |
+
## HP-IHP-75190
|
| 571 |
+
|
| 572 |
+
**Environmental, Safety, Health & Quality Directorate**
|
| 573 |
+
**SHSD Industrial Hygiene**
|
| 574 |
+
|
| 575 |
+
# Surface Wipe Sampling for Metals
|
| 576 |
+
## Job Performance Measure (JPM) Completion Certificate
|
| 577 |
+
|
| 578 |
+
| Candidate's Name | Life Number: | Qualification Number: |
|
| 579 |
+
|------------------|--------------|----------------------|
|
| 580 |
+
| | | HP-IHP- 75190 |
|
| 581 |
+
|
| 582 |
+
---
|
| 583 |
+
|
| 584 |
+
### Knowledge of the Principles of Surface Wipe Sampling - Demonstrated by Written Exam
|
| 585 |
+
|
| 586 |
+
| Criteria | Qualifying Standard |
|
| 587 |
+
|----------|---------------------|
|
| 588 |
+
| **Hazard Analysis** | Understands the need to perform a hazard analysis of the sampling area and potential exposure to the sampler. |
|
| 589 |
+
| **Personal Protective Equipment** | Understands the need to be aware of the potential surface contamination and airborne levels of contaminants and knows how to determine the need for PPE. |
|
| 590 |
+
| **Sampling Protocol** | Understands the exposure monitoring logic necessary to appropriately select sampling locations to accurately measure worker, public and environmental exposure potential. |
|
| 591 |
+
| **Analysis of data** | Understands the need to perform analysis on the sampling data to assess potential exposure to the sampler, worker, public and environment, and to recommend corrective actions as necessary. |
|
| 592 |
+
|
| 593 |
+
---
|
| 594 |
+
|
| 595 |
+
### Practical Skill Evaluation: Demonstration of Surface Wipe Methodology
|
| 596 |
+
|
| 597 |
+
| Criteria | Qualifying Performance Standard | Unsat. | Recov. | Satisf. |
|
| 598 |
+
|----------|--------------------------------|--------|--------|---------|
|
| 599 |
+
| **Sampling Equipment** | Knows where equipment needed for the procedure is located and how to properly sign it out. | | | |
|
| 600 |
+
| **Moistening Media** | a. Filter/gauze: Moistens media with the appropriate solvent. Applies solvent to moisten approximately 80% of the area of the media. Does not over moisten. b. For pre-moistened media, shows reduction in size of wipe. | | | |
|
| 601 |
+
| **Size of Area & Use of Template** | Understands the importance of quantifying the area sampled. Demonstrates placing template on surface or measuring the surface area. | | | |
|
| 602 |
+
| **Folding Media at each wipe step** | Demonstrates the inward folding of media after each wipe and placement of media into container so that surfaces loaded in the wiping are not exposed. | | | |
|
| 603 |
+
| **NIOSH Method wipe pattern** | Demonstrates the technique of three passes of wiping in "S" pattern, changing the direction on second pass, original direction on third pass. | | | |
|
| 604 |
+
| **Choose correct solvent** | Knows how to select correct solvent from Table 1. | | | |
|
| 605 |
+
| **Select the correct number of samples** | Knows how to choose the appropriate numbers of samples based on Table 2. | | | |
|
| 606 |
+
| **Record forms** | Shows how to correctly and completely fill all forms associated with this SOP. | | | |
|
| 607 |
+
|
| 608 |
+
---
|
| 609 |
+
|
| 610 |
+
I accept the responsibility for performing this task as demonstrated within this JPM and the corresponding SOP.
|
| 611 |
+
|
| 612 |
+
| Candidate Signature: | Date: |
|
| 613 |
+
|---------------------|-------|
|
| 614 |
+
| | |
|
| 615 |
+
|
| 616 |
+
I certify the candidate has satisfactorily performed each of the above listed steps and is capable of performing the task unsupervised.
|
| 617 |
+
|
| 618 |
+
| Evaluator Signature: | Date: |
|
| 619 |
+
|---------------------|-------|
|
| 620 |
+
| | |
|
| 621 |
+
|
| 622 |
+
*SOP-IH75190 JPM Form (Revision Date: 06/13/16)*
|
RAG-KB/Technical Guide for Wildfire Restoration - Key Information.md
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Technical Guide for Wildfire Restoration - Key Information
|
| 2 |
+
|
| 3 |
+
**Source:** IICRC/RIA/CIRI Technical Guide for Wildfire Restoration
|
| 4 |
+
**Version:** Version 2, December 9th 2025
|
| 5 |
+
**URL:** https://iicrc.org/wp-content/uploads/2025/12/IICRC.RIA_.CIRI-Technical-Guide-for-Wildfire-Restoration-V2-Final-2025-12.09.pdf
|
| 6 |
+
**Organizations:** Institute of Inspection, Cleaning, and Restoration Certification (IICRC), Restoration Industry Association (RIA), Cleaning Industry Research Institute (CIRI)
|
| 7 |
+
|
| 8 |
+
## Purpose and Scope
|
| 9 |
+
|
| 10 |
+
This technical guide presents current and common methodology of prudent wildfire restoration practices. It represents thousands of restoration companies and professionals who have returned families to their homes safely using proven, science-based methodologies in accordance with peer-reviewed industry standards.
|
| 11 |
+
|
| 12 |
+
## Key Message
|
| 13 |
+
|
| 14 |
+
The guide addresses a growing unfounded sentiment that homes affected by wildfire smoke and its byproducts are categorically uncleanable and unrestorable. The guide emphasizes that:
|
| 15 |
+
- Wildfire smoke damage is a superficial occurrence that can generally be cleaned
|
| 16 |
+
- Specialized cleaning methodologies have been successfully used for decades
|
| 17 |
+
- Professional restoration is science-based and proven
|
| 18 |
+
- Categorical disposal of all materials and structures is inconsistent with science and industry standards
|
| 19 |
+
|
| 20 |
+
## Four Core Procedural Principles
|
| 21 |
+
|
| 22 |
+
### 1. Pre-Restoration Evaluation (PRE)
|
| 23 |
+
- Critical first step performed by the restorer
|
| 24 |
+
- Establishes degree of impact from wildfire event
|
| 25 |
+
- Goal: identify presence of wildfire-related combustion byproducts through visual and sensory inspection
|
| 26 |
+
- Identifies key risk factors
|
| 27 |
+
- Determines whether restoration can begin immediately or if formal assessment is needed
|
| 28 |
+
|
| 29 |
+
### 2. Pre-Restoration Assessment (PRA)
|
| 30 |
+
- Formal, third-party process
|
| 31 |
+
- Typically performed by Industrial Hygienist (IH) or qualified OEHS professional
|
| 32 |
+
- Triggered by specific findings in PRE, stakeholder request, or AHJ requirements
|
| 33 |
+
- Uses scientific sampling and laboratory analysis
|
| 34 |
+
- Definitively characterizes type and extent of combustion byproducts
|
| 35 |
+
- Establishes data-driven, defensible scope of work
|
| 36 |
+
|
| 37 |
+
### 3. The Restoration Phase
|
| 38 |
+
- Physical process of removing wildfire-related combustion byproducts
|
| 39 |
+
- Goal: return structure, systems, and contents to clean, safe, odor-free condition
|
| 40 |
+
- Includes detailed source-removal cleaning
|
| 41 |
+
- Indoor air quality management
|
| 42 |
+
- Proper documentation and disposal of non-salvageable items
|
| 43 |
+
|
| 44 |
+
### 4. Project Completion
|
| 45 |
+
- Final critical phase
|
| 46 |
+
- Establishes success of restoration efforts
|
| 47 |
+
- Collects evidence that combustion byproducts have been effectively removed
|
| 48 |
+
- Two components:
|
| 49 |
+
- **Restoration Completion Evaluation (RCE)**: conducted by restorer
|
| 50 |
+
- **Post Restoration Verification (PRV)**: performed by independent third party when necessary
|
| 51 |
+
|
| 52 |
+
## Key Terminology
|
| 53 |
+
|
| 54 |
+
**Combustion By-Products (CBP):** Resulting substances (char, ash, smoke) created from a fire event
|
| 55 |
+
|
| 56 |
+
**Combustion Byproducts of Concern (CBC):** Wildfire-related combustion byproducts that can pose potential for continued damage or elevated human health risks
|
| 57 |
+
|
| 58 |
+
**Burn Zone:** Wildfire impact zone with direct flame impingement or significant radiant heat
|
| 59 |
+
|
| 60 |
+
**Near-Field Zone:** Extends from fire perimeter to approximately 1-10 kilometers (0.6 to 6.2 miles); affected by hot, turbulent smoke plume forcing particulates and gaseous combustion byproducts (VOCs) into building envelope
|
| 61 |
+
|
| 62 |
+
**Far-Field Zone:** Extends beyond Near-Field Zone, potentially for hundreds of miles; primary impact is infiltration of fine particulate matter (PM2.5); impact is often surface-level and highly correctable
|
| 63 |
+
|
| 64 |
+
## Document Structure
|
| 65 |
+
|
| 66 |
+
The guide includes:
|
| 67 |
+
- Introduction
|
| 68 |
+
- Combustion Byproducts of Concern (CBC)
|
| 69 |
+
- Impact Zones
|
| 70 |
+
- Pre-Restoration Evaluation and Assessment
|
| 71 |
+
- The Restoration Phase (health/safety, procedures, removal of unrestorable goods)
|
| 72 |
+
- Project Completion
|
| 73 |
+
- Glossary of Terms
|
| 74 |
+
- References
|
| 75 |
+
|
| 76 |
+
## Related Reference
|
| 77 |
+
|
| 78 |
+
The guide references the **AIHA Technical Guide for Wildfire Impact Assessments for the OEHS Professional**, 2nd edition (2025) for more information on assessment processes.
|
| 79 |
+
|
RAG-KB/air-o-cell-method-guide-atlas.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
RAG-KB/wildfire_soot_particulate_removal_full_text_extraction.md
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
SOOT PARTICLES:
|
| 2 |
+
A Procedural Guide for Containing and Removing Wildfire-Caused Soot in Buildings
|
| 3 |
+
|
| 4 |
+
By Patrick J. Moffett, REA, CHMM
|
| 5 |
+
Environmental Management & Engineering, Inc.
|
| 6 |
+
Huntington Beach, California
|
| 7 |
+
|
| 8 |
+
Copyright © 1997, 2002, 2008
|
| 9 |
+
All Rights Reserved
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
SOOT PARTICLES:
|
| 13 |
+
A Procedural Guide for Containing and Removing Wildfire-Caused Soot in Buildings
|
| 14 |
+
|
| 15 |
+
By Patrick J. Moffett, REA, CHMM
|
| 16 |
+
Environmental Management & Engineering, Inc.
|
| 17 |
+
Huntington Beach, California
|
| 18 |
+
|
| 19 |
+
Copyright © 1997, 2002, 2008
|
| 20 |
+
All Rights Reserved
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
COMMENTARY
|
| 24 |
+
|
| 25 |
+
The purpose of this paper is to provide a procedural guide for the restoration of buildings and contents contaminated with wildfire-caused soot. This paper was written primarily for restorers, insurance adjusters, and building owners who are dealing with extensive wildfire-caused soot contamination. This paper is not intended to be a comprehensive restoration manual for all smoke and soot contamination conditions. This paper focuses on wildfire-caused soot, ash, and odor contamination, and addresses worker and occupant safety and health issues; and in 2008, the paper was updated to address new concerns regarding ultrafine particles.
|
| 26 |
+
|
| 27 |
+
Worker Safety
|
| 28 |
+
|
| 29 |
+
In recent years, many restoration workers have been involved in cleaning wildfire-caused soot contamination. During these projects, workers often were observed wearing little or no respiratory protection. In some cases, workers were observed wearing simple dust masks or N95 respirators while performing soot cleaning activities. In other cases, workers were observed wearing N100 respirators or half-face respirators equipped with HEPA cartridges. In some cases, workers were observed wearing full-face respirators equipped with HEPA and organic vapor cartridges.
|
| 30 |
+
|
| 31 |
+
The question arises: What type of respiratory protection is appropriate for wildfire soot cleanup? In order to answer this question, it is important to understand the nature of wildfire-caused soot, including the size of the soot particles, the chemical composition of the soot, and the potential health hazards associated with exposure to soot.
|
| 32 |
+
|
| 33 |
+
PART I
|
| 34 |
+
Particles and Chemicals in Smoke and Soot
|
| 35 |
+
|
| 36 |
+
Wildfire Smoke
|
| 37 |
+
|
| 38 |
+
Smoke is a complex mixture of gases and particles produced by the incomplete combustion of organic materials. Wildfire smoke contains numerous chemicals, including carbon monoxide, nitrogen oxides, hydrocarbons, aldehydes, ketones, alcohols, benzo[a]pyrene, and organic acids. The composition of wildfire smoke varies depending on the type of fuel burned, the combustion temperature, and the availability of oxygen.
|
| 39 |
+
|
| 40 |
+
Hot, flaming combustion tends to produce black smoke composed primarily of elemental carbon particles. Cooler, smoldering combustion tends to produce white or gray smoke composed of incompletely combusted organic materials.
|
| 41 |
+
|
| 42 |
+
Soot
|
| 43 |
+
|
| 44 |
+
Soot is composed primarily of carbon particles produced by incomplete combustion. Soot particles are often coated with organic chemicals, including polycyclic aromatic hydrocarbons (PAHs) and other combustion byproducts. The chemical composition of soot varies depending on the type of fuel burned.
|
| 45 |
+
|
| 46 |
+
Vegetation fires tend to produce gray or light-colored ash and soot composed primarily of inorganic ash and partially combusted organic materials. Fires involving petroleum products, plastics, roofing materials, and synthetic furnishings tend to produce black, oily soot composed primarily of carbon black.
|
| 47 |
+
|
| 48 |
+
Particle Size
|
| 49 |
+
|
| 50 |
+
Soot particles vary widely in size. Candle soot particles typically range from approximately 0.06 to 0.1 micrometers (µm) in diameter. Wildfire-caused soot particles may range from less than 0.1 µm to more than 30 µm in diameter. Larger particles, including embers, may be several inches in diameter.
|
| 51 |
+
|
| 52 |
+
Particle Deposition
|
| 53 |
+
|
| 54 |
+
Soot particles may be deposited on building surfaces by a variety of mechanisms, including gravity settling, impaction, diffusion, thermophoresis, and electrostatic attraction. Thermophoresis causes particles to move from warmer air toward cooler surfaces. Electrostatic attraction causes charged particles to be attracted to oppositely charged surfaces.
|
| 55 |
+
|
| 56 |
+
As a result of these mechanisms, soot often deposits preferentially on cooler surfaces, such as exterior walls, window frames, and surfaces near air leaks. Moist surfaces also tend to attract soot particles.
|
| 57 |
+
|
| 58 |
+
Firestorms and Convection
|
| 59 |
+
|
| 60 |
+
Large wildfires can generate intense convection currents, sometimes referred to as firestorms. These convection currents can create strong winds, dust devils, and fire whirls that carry smoke, ash, and soot over long distances. Buildings located near wildfires may be subjected to complex airflow patterns that influence the deposition of soot on interior and exterior surfaces.
|
| 61 |
+
|
| 62 |
+
PART II
|
| 63 |
+
Environmental and Human Health Concerns
|
| 64 |
+
|
| 65 |
+
Chemical Composition
|
| 66 |
+
|
| 67 |
+
Soot typically contains approximately 60 percent carbon by weight. The remaining portion consists of a complex mixture of organic and inorganic chemicals, including PAHs and heavy metals such as arsenic, cadmium, chromium, and nickel. Thousands of individual compounds may be present in soot, many of which can be identified only by gas chromatography/mass spectrometry (GC/MS) analysis.
|
| 68 |
+
|
| 69 |
+
Health Hazards
|
| 70 |
+
|
| 71 |
+
Soot has been recognized as a human carcinogen. Occupational exposure to soot has been associated with an increased risk of skin cancer, lung cancer, and other health effects. Historically, chimney sweeps were known to suffer high rates of cancer due to soot exposure.
|
| 72 |
+
|
| 73 |
+
Workers involved in wildfire soot cleanup may be exposed to high concentrations of soot particles and associated chemicals. In some cases, these exposures may be comparable to or greater than those experienced by chimney sweeps and other workers historically exposed to soot.
|
| 74 |
+
|
| 75 |
+
Ultrafine Particles
|
| 76 |
+
|
| 77 |
+
In recent years, increased attention has been focused on ultrafine particles (particles smaller than 0.1 µm). Ultrafine particles are capable of penetrating deep into the lungs and entering the bloodstream. These particles may cause inflammation, oxidative stress, and other adverse health effects.
|
| 78 |
+
|
| 79 |
+
Wildfire smoke and soot contain large numbers of ultrafine particles. As a result, wildfire soot cleanup workers may be at risk of exposure to ultrafine particles unless appropriate respiratory protection is used.
|
| 80 |
+
|
| 81 |
+
Respiratory Protection
|
| 82 |
+
|
| 83 |
+
Respiratory protection for wildfire soot cleanup should be selected based on the size of the particles present and the presence of gaseous contaminants. Simple dust masks and N95 respirators are not adequate to protect against fine and ultrafine soot particles.
|
| 84 |
+
|
| 85 |
+
P100 respirators provide a minimum filtration efficiency of 99.97 percent for oil-based particles and are suitable for protection against fine and ultrafine soot particles. However, P100 particulate filters do not provide protection against gaseous contaminants such as carbon monoxide and organic vapors.
|
| 86 |
+
|
| 87 |
+
In situations where organic vapors or other gases are present, respirators equipped with both P100 particulate filters and organic vapor cartridges may be required. Full-face respirators provide additional protection for the eyes and face.
|
| 88 |
+
|
| 89 |
+
PART III
|
| 90 |
+
Procedures for Removing Wildfire Soot from Contents
|
| 91 |
+
|
| 92 |
+
General Principles
|
| 93 |
+
|
| 94 |
+
The removal of wildfire-caused soot from contents should be approached systematically to minimize the spread of contamination and protect workers and occupants. Contents should be evaluated to determine whether they can be cleaned or must be discarded.
|
| 95 |
+
|
| 96 |
+
Dry Cleaning Methods
|
| 97 |
+
|
| 98 |
+
Dry cleaning methods are often preferred for removing soot from contents because they minimize the spread of contamination and reduce the risk of driving soot deeper into porous materials. Examples of dry cleaning methods include HEPA vacuuming, dry sponging, and the use of specialized dry cleaning compounds.
|
| 99 |
+
|
| 100 |
+
Wet Cleaning Methods
|
| 101 |
+
|
| 102 |
+
Wet cleaning methods may be used when dry methods are not effective. Wet cleaning should be performed carefully to avoid spreading contamination. Detergents and cleaning agents should be selected based on the type of material being cleaned and the nature of the soot.
|
| 103 |
+
|
| 104 |
+
Electronics
|
| 105 |
+
|
| 106 |
+
Electronics contaminated with wildfire soot require special handling. Soot particles can cause corrosion and electrical shorts. In many cases, electronics should be evaluated by qualified technicians and may require specialized cleaning or replacement.
|
| 107 |
+
|
| 108 |
+
PART IV
|
| 109 |
+
Procedures for Removing Wildfire Soot from Buildings
|
| 110 |
+
|
| 111 |
+
Containment
|
| 112 |
+
|
| 113 |
+
Containment is critical to prevent the spread of soot during cleaning activities. Affected areas should be isolated using plastic sheeting and negative air pressure where feasible.
|
| 114 |
+
|
| 115 |
+
Surface Cleaning
|
| 116 |
+
|
| 117 |
+
Building surfaces should be cleaned using methods appropriate for the type of surface and the degree of contamination. Dry cleaning methods should be used whenever possible. Wet cleaning may be used when necessary, with care taken to avoid spreading soot.
|
| 118 |
+
|
| 119 |
+
HVAC Systems
|
| 120 |
+
|
| 121 |
+
Heating, ventilation, and air conditioning (HVAC) systems can become contaminated with wildfire soot. HVAC systems should be inspected and cleaned as necessary to prevent the redistribution of soot throughout the building.
|
| 122 |
+
|
| 123 |
+
Post-Cleaning Verification
|
| 124 |
+
|
| 125 |
+
After cleaning, surfaces should be inspected to verify that soot has been removed. In some cases, surface sampling or air monitoring may be used to confirm the effectiveness of cleaning.
|
| 126 |
+
|
| 127 |
+
Author
|
| 128 |
+
|
| 129 |
+
Patrick J. Moffett, REA, CHMM, is the principal of Environmental Management & Engineering, Inc., based in Huntington Beach, California. He has extensive experience in environmental health and safety, industrial hygiene, and hazardous materials management.
|
| 130 |
+
|
| 131 |
+
References
|
| 132 |
+
|
| 133 |
+
[References as listed in the original document]
|
| 134 |
+
|
README.md
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: FDAM AI Pipeline
|
| 3 |
+
emoji: "\U0001F525"
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: yellow
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: "6.3.0"
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
suggested_hardware: l4x4
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
# FDAM AI Pipeline
|
| 14 |
+
|
| 15 |
+
**Fire Damage Assessment Methodology v4.0.1** - An AI-powered system that generates professional Cleaning Specifications / Scope of Work documents for fire damage restoration.
|
| 16 |
+
|
| 17 |
+
## Features
|
| 18 |
+
|
| 19 |
+
- **AI-Powered Image Analysis**: Uses Qwen3-VL vision model to detect fire damage zones, conditions, and materials
|
| 20 |
+
- **FDAM Compliant**: Implements Fire Damage Assessment Methodology v4.0.1 standards
|
| 21 |
+
- **Automated Calculations**: Air filtration, sample density, labor estimates per FDAM formulas
|
| 22 |
+
- **Professional PDF Output**: Generates ready-to-use Scope of Work documents
|
| 23 |
+
- **Session Persistence**: Save and resume assessments via browser localStorage
|
| 24 |
+
|
| 25 |
+
## How to Use
|
| 26 |
+
|
| 27 |
+
1. **Project Info**: Enter project details, facility classification, and assessor information
|
| 28 |
+
2. **Building/Rooms**: Add rooms with dimensions (length, width, ceiling height)
|
| 29 |
+
3. **Images**: Upload fire damage photos and associate with rooms
|
| 30 |
+
4. **Observations**: Record qualitative observations (odor, soot, char, etc.)
|
| 31 |
+
5. **Generate**: Click "Generate Assessment" to run AI analysis and produce documents
|
| 32 |
+
|
| 33 |
+
## Technical Details
|
| 34 |
+
|
| 35 |
+
### Model Stack (~90GB VRAM)
|
| 36 |
+
- **Vision**: Qwen3-VL-30B-A3B-Instruct (~58GB)
|
| 37 |
+
- **Embeddings**: Qwen3-VL-Embedding-8B (~16GB)
|
| 38 |
+
- **Reranker**: Qwen3-VL-Reranker-8B (~16GB)
|
| 39 |
+
|
| 40 |
+
### Zone Classifications
|
| 41 |
+
- **Burn Zone**: Direct fire involvement, structural damage
|
| 42 |
+
- **Near-Field**: Adjacent to burn zone, heavy smoke/heat exposure
|
| 43 |
+
- **Far-Field**: Smoke migration only, light deposits
|
| 44 |
+
|
| 45 |
+
### Condition Levels
|
| 46 |
+
- **Background**: No visible contamination
|
| 47 |
+
- **Light**: Faint discoloration, minimal deposits
|
| 48 |
+
- **Moderate**: Visible film/deposits
|
| 49 |
+
- **Heavy**: Thick deposits, surface texture obscured
|
| 50 |
+
- **Structural Damage**: Physical damage requiring repair
|
| 51 |
+
|
| 52 |
+
## Development
|
| 53 |
+
|
| 54 |
+
```bash
|
| 55 |
+
# Local development (mock models)
|
| 56 |
+
MOCK_MODELS=true python app.py
|
| 57 |
+
|
| 58 |
+
# Run tests
|
| 59 |
+
pytest tests/ -v
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
## Requirements
|
| 63 |
+
|
| 64 |
+
- Python 3.10+
|
| 65 |
+
- 96GB GPU memory for real model inference (4x L4 or equivalent)
|
| 66 |
+
- See `requirements.txt` for full dependencies
|
| 67 |
+
|
| 68 |
+
## License
|
| 69 |
+
|
| 70 |
+
Proprietary - For authorized use only.
|
app.py
ADDED
|
@@ -0,0 +1,428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM AI Pipeline - Fire Damage Assessment Methodology v4.0.1
|
| 2 |
+
|
| 3 |
+
Main Gradio application entry point with session state and tab validation.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
|
| 8 |
+
from config.settings import settings
|
| 9 |
+
from models.loader import get_models
|
| 10 |
+
from ui.state import SessionState, create_new_session, session_to_json, session_from_json
|
| 11 |
+
from ui.storage import get_head_html
|
| 12 |
+
from ui.tabs import project, rooms, images, observations, results
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def create_app() -> gr.Blocks:
|
| 16 |
+
"""Create the main Gradio application."""
|
| 17 |
+
|
| 18 |
+
# Initialize models at startup
|
| 19 |
+
model_stack = get_models()
|
| 20 |
+
|
| 21 |
+
# Note: head parameter moved to launch() in Gradio 6.0
|
| 22 |
+
# localStorage JS will be injected there
|
| 23 |
+
with gr.Blocks(
|
| 24 |
+
title="FDAM AI Pipeline - Fire Damage Assessment",
|
| 25 |
+
) as app:
|
| 26 |
+
# Session state (stored in Gradio State component)
|
| 27 |
+
session_state = gr.State(value=create_new_session())
|
| 28 |
+
|
| 29 |
+
# Header
|
| 30 |
+
gr.Markdown(
|
| 31 |
+
"""
|
| 32 |
+
# FDAM AI Pipeline
|
| 33 |
+
## Fire Damage Assessment Methodology v4.0.1
|
| 34 |
+
|
| 35 |
+
Upload images and project information to generate a professional
|
| 36 |
+
Cleaning Specification / Scope of Work.
|
| 37 |
+
"""
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
# Mode indicator
|
| 41 |
+
if settings.mock_models:
|
| 42 |
+
gr.Markdown(
|
| 43 |
+
"""
|
| 44 |
+
> **Development Mode**: Using mock models for testing.
|
| 45 |
+
> Set `MOCK_MODELS=false` for production inference.
|
| 46 |
+
"""
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# Tab navigation
|
| 50 |
+
with gr.Tabs() as tabs:
|
| 51 |
+
# Tab 1: Project Information
|
| 52 |
+
with gr.Tab("1. Project Info", id=0):
|
| 53 |
+
tab1 = project.create_tab()
|
| 54 |
+
|
| 55 |
+
# Tab 2: Building/Rooms
|
| 56 |
+
with gr.Tab("2. Building/Rooms", id=1):
|
| 57 |
+
tab2 = rooms.create_tab()
|
| 58 |
+
|
| 59 |
+
# Tab 3: Images
|
| 60 |
+
with gr.Tab("3. Images", id=2):
|
| 61 |
+
tab3 = images.create_tab()
|
| 62 |
+
|
| 63 |
+
# Tab 4: Observations
|
| 64 |
+
with gr.Tab("4. Observations", id=3):
|
| 65 |
+
tab4 = observations.create_tab()
|
| 66 |
+
|
| 67 |
+
# Tab 5: Generate Results
|
| 68 |
+
with gr.Tab("5. Generate Results", id=4):
|
| 69 |
+
tab5 = results.create_tab()
|
| 70 |
+
|
| 71 |
+
# --- Event Handlers ---
|
| 72 |
+
|
| 73 |
+
# Tab 1: Project Info
|
| 74 |
+
tab1["validate_btn"].click(
|
| 75 |
+
fn=project.validate_and_continue,
|
| 76 |
+
inputs=[
|
| 77 |
+
session_state,
|
| 78 |
+
tab1["project_name"],
|
| 79 |
+
tab1["address"],
|
| 80 |
+
tab1["city"],
|
| 81 |
+
tab1["state"],
|
| 82 |
+
tab1["zip_code"],
|
| 83 |
+
tab1["client_name"],
|
| 84 |
+
tab1["fire_date"],
|
| 85 |
+
tab1["assessment_date"],
|
| 86 |
+
tab1["facility_classification"],
|
| 87 |
+
tab1["construction_era"],
|
| 88 |
+
tab1["assessor_name"],
|
| 89 |
+
tab1["assessor_credentials"],
|
| 90 |
+
],
|
| 91 |
+
outputs=[
|
| 92 |
+
session_state,
|
| 93 |
+
tab1["validation_status"],
|
| 94 |
+
tabs,
|
| 95 |
+
],
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
# Tab 2: Building/Rooms
|
| 99 |
+
tab2["add_room_btn"].click(
|
| 100 |
+
fn=rooms.add_room,
|
| 101 |
+
inputs=[
|
| 102 |
+
session_state,
|
| 103 |
+
tab2["room_name"],
|
| 104 |
+
tab2["room_floor"],
|
| 105 |
+
tab2["room_length"],
|
| 106 |
+
tab2["room_width"],
|
| 107 |
+
tab2["room_height"],
|
| 108 |
+
],
|
| 109 |
+
outputs=[
|
| 110 |
+
session_state,
|
| 111 |
+
tab2["rooms_table"],
|
| 112 |
+
tab2["validation_status"],
|
| 113 |
+
tab2["room_count"],
|
| 114 |
+
tab2["total_area"],
|
| 115 |
+
tab2["total_volume"],
|
| 116 |
+
tab2["room_name"],
|
| 117 |
+
tab2["room_floor"],
|
| 118 |
+
tab2["room_length"],
|
| 119 |
+
tab2["room_width"],
|
| 120 |
+
tab2["room_height"],
|
| 121 |
+
],
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
tab2["clear_form_btn"].click(
|
| 125 |
+
fn=lambda: ("", "", None, None, None),
|
| 126 |
+
outputs=[
|
| 127 |
+
tab2["room_name"],
|
| 128 |
+
tab2["room_floor"],
|
| 129 |
+
tab2["room_length"],
|
| 130 |
+
tab2["room_width"],
|
| 131 |
+
tab2["room_height"],
|
| 132 |
+
],
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
tab2["remove_last_btn"].click(
|
| 136 |
+
fn=rooms.remove_last_room,
|
| 137 |
+
inputs=[session_state],
|
| 138 |
+
outputs=[
|
| 139 |
+
session_state,
|
| 140 |
+
tab2["rooms_table"],
|
| 141 |
+
tab2["validation_status"],
|
| 142 |
+
tab2["room_count"],
|
| 143 |
+
tab2["total_area"],
|
| 144 |
+
tab2["total_volume"],
|
| 145 |
+
],
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
tab2["clear_all_btn"].click(
|
| 149 |
+
fn=rooms.clear_all_rooms,
|
| 150 |
+
inputs=[session_state],
|
| 151 |
+
outputs=[
|
| 152 |
+
session_state,
|
| 153 |
+
tab2["rooms_table"],
|
| 154 |
+
tab2["validation_status"],
|
| 155 |
+
tab2["room_count"],
|
| 156 |
+
tab2["total_area"],
|
| 157 |
+
tab2["total_volume"],
|
| 158 |
+
],
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
tab2["validate_btn"].click(
|
| 162 |
+
fn=rooms.validate_and_continue,
|
| 163 |
+
inputs=[session_state],
|
| 164 |
+
outputs=[
|
| 165 |
+
session_state,
|
| 166 |
+
tab2["validation_status"],
|
| 167 |
+
tabs,
|
| 168 |
+
],
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
tab2["back_btn"].click(
|
| 172 |
+
fn=lambda: 0,
|
| 173 |
+
outputs=[tabs],
|
| 174 |
+
)
|
| 175 |
+
|
| 176 |
+
# Tab 3: Images
|
| 177 |
+
# Update room dropdown when entering tab
|
| 178 |
+
tabs.select(
|
| 179 |
+
fn=lambda session, selected: (
|
| 180 |
+
images.update_room_choices(session) if selected == 2 else gr.update()
|
| 181 |
+
),
|
| 182 |
+
inputs=[session_state, tabs],
|
| 183 |
+
outputs=[tab3["room_select"]],
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
tab3["add_image_btn"].click(
|
| 187 |
+
fn=images.add_image,
|
| 188 |
+
inputs=[
|
| 189 |
+
session_state,
|
| 190 |
+
tab3["image_upload"],
|
| 191 |
+
tab3["room_select"],
|
| 192 |
+
tab3["image_description"],
|
| 193 |
+
],
|
| 194 |
+
outputs=[
|
| 195 |
+
session_state,
|
| 196 |
+
tab3["images_gallery"],
|
| 197 |
+
tab3["validation_status"],
|
| 198 |
+
tab3["image_count"],
|
| 199 |
+
tab3["image_upload"],
|
| 200 |
+
tab3["image_description"],
|
| 201 |
+
tab3["room_select"],
|
| 202 |
+
],
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
tab3["clear_upload_btn"].click(
|
| 206 |
+
fn=lambda: (None, ""),
|
| 207 |
+
outputs=[
|
| 208 |
+
tab3["image_upload"],
|
| 209 |
+
tab3["image_description"],
|
| 210 |
+
],
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
tab3["remove_last_btn"].click(
|
| 214 |
+
fn=images.remove_last_image,
|
| 215 |
+
inputs=[session_state],
|
| 216 |
+
outputs=[
|
| 217 |
+
session_state,
|
| 218 |
+
tab3["images_gallery"],
|
| 219 |
+
tab3["validation_status"],
|
| 220 |
+
tab3["image_count"],
|
| 221 |
+
],
|
| 222 |
+
)
|
| 223 |
+
|
| 224 |
+
tab3["clear_all_btn"].click(
|
| 225 |
+
fn=images.clear_all_images,
|
| 226 |
+
inputs=[session_state],
|
| 227 |
+
outputs=[
|
| 228 |
+
session_state,
|
| 229 |
+
tab3["images_gallery"],
|
| 230 |
+
tab3["validation_status"],
|
| 231 |
+
tab3["image_count"],
|
| 232 |
+
],
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
tab3["validate_btn"].click(
|
| 236 |
+
fn=images.validate_and_continue,
|
| 237 |
+
inputs=[session_state],
|
| 238 |
+
outputs=[
|
| 239 |
+
session_state,
|
| 240 |
+
tab3["validation_status"],
|
| 241 |
+
tabs,
|
| 242 |
+
],
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
tab3["back_btn"].click(
|
| 246 |
+
fn=lambda: 1,
|
| 247 |
+
outputs=[tabs],
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
# Tab 4: Observations
|
| 251 |
+
tab4["validate_btn"].click(
|
| 252 |
+
fn=observations.validate_and_continue,
|
| 253 |
+
inputs=[
|
| 254 |
+
session_state,
|
| 255 |
+
tab4["smoke_odor"],
|
| 256 |
+
tab4["odor_intensity"],
|
| 257 |
+
tab4["visible_soot"],
|
| 258 |
+
tab4["soot_description"],
|
| 259 |
+
tab4["large_char"],
|
| 260 |
+
tab4["char_density"],
|
| 261 |
+
tab4["ash_residue"],
|
| 262 |
+
tab4["ash_description"],
|
| 263 |
+
tab4["surface_discoloration"],
|
| 264 |
+
tab4["discoloration_description"],
|
| 265 |
+
tab4["dust_interference"],
|
| 266 |
+
tab4["dust_notes"],
|
| 267 |
+
tab4["wildfire_indicators"],
|
| 268 |
+
tab4["wildfire_notes"],
|
| 269 |
+
tab4["additional_notes"],
|
| 270 |
+
],
|
| 271 |
+
outputs=[
|
| 272 |
+
session_state,
|
| 273 |
+
tab4["validation_status"],
|
| 274 |
+
tabs,
|
| 275 |
+
],
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
tab4["back_btn"].click(
|
| 279 |
+
fn=lambda: 2,
|
| 280 |
+
outputs=[tabs],
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
# Tab 5: Generate Results
|
| 284 |
+
# Update preflight check when entering tab
|
| 285 |
+
tabs.select(
|
| 286 |
+
fn=lambda session, selected: (
|
| 287 |
+
results.check_preflight(session) if selected == 4 else ""
|
| 288 |
+
),
|
| 289 |
+
inputs=[session_state, tabs],
|
| 290 |
+
outputs=[tab5["preflight_status"]],
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
tab5["generate_btn"].click(
|
| 294 |
+
fn=results.generate_assessment,
|
| 295 |
+
inputs=[session_state],
|
| 296 |
+
outputs=[
|
| 297 |
+
session_state,
|
| 298 |
+
tab5["processing_status"],
|
| 299 |
+
tab5["progress_html"],
|
| 300 |
+
tab5["annotated_gallery"],
|
| 301 |
+
tab5["stats_output"],
|
| 302 |
+
tab5["sow_output"],
|
| 303 |
+
tab5["download_md"],
|
| 304 |
+
tab5["download_pdf"],
|
| 305 |
+
],
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
tab5["regenerate_btn"].click(
|
| 309 |
+
fn=results.generate_assessment,
|
| 310 |
+
inputs=[session_state],
|
| 311 |
+
outputs=[
|
| 312 |
+
session_state,
|
| 313 |
+
tab5["processing_status"],
|
| 314 |
+
tab5["progress_html"],
|
| 315 |
+
tab5["annotated_gallery"],
|
| 316 |
+
tab5["stats_output"],
|
| 317 |
+
tab5["sow_output"],
|
| 318 |
+
tab5["download_md"],
|
| 319 |
+
tab5["download_pdf"],
|
| 320 |
+
],
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
tab5["back_btn"].click(
|
| 324 |
+
fn=lambda: 3,
|
| 325 |
+
outputs=[tabs],
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
# --- Session Resume Handlers ---
|
| 329 |
+
# Load form data when navigating to tabs
|
| 330 |
+
|
| 331 |
+
# Tab 1 (Project): Load project form fields
|
| 332 |
+
tabs.select(
|
| 333 |
+
fn=lambda session, selected: (
|
| 334 |
+
project.load_form_from_session(session) if selected == 0
|
| 335 |
+
else tuple([gr.update()] * 12)
|
| 336 |
+
),
|
| 337 |
+
inputs=[session_state, tabs],
|
| 338 |
+
outputs=[
|
| 339 |
+
tab1["project_name"],
|
| 340 |
+
tab1["address"],
|
| 341 |
+
tab1["city"],
|
| 342 |
+
tab1["state"],
|
| 343 |
+
tab1["zip_code"],
|
| 344 |
+
tab1["client_name"],
|
| 345 |
+
tab1["fire_date"],
|
| 346 |
+
tab1["assessment_date"],
|
| 347 |
+
tab1["facility_classification"],
|
| 348 |
+
tab1["construction_era"],
|
| 349 |
+
tab1["assessor_name"],
|
| 350 |
+
tab1["assessor_credentials"],
|
| 351 |
+
],
|
| 352 |
+
)
|
| 353 |
+
|
| 354 |
+
# Tab 2 (Rooms): Load room table and stats
|
| 355 |
+
tabs.select(
|
| 356 |
+
fn=lambda session, selected: (
|
| 357 |
+
rooms.load_from_session(session) if selected == 1
|
| 358 |
+
else (gr.update(), gr.update(), gr.update(), gr.update())
|
| 359 |
+
),
|
| 360 |
+
inputs=[session_state, tabs],
|
| 361 |
+
outputs=[
|
| 362 |
+
tab2["rooms_table"],
|
| 363 |
+
tab2["room_count"],
|
| 364 |
+
tab2["total_area"],
|
| 365 |
+
tab2["total_volume"],
|
| 366 |
+
],
|
| 367 |
+
)
|
| 368 |
+
|
| 369 |
+
# Tab 3 (Images): Load gallery and count (room dropdown already handled above)
|
| 370 |
+
tabs.select(
|
| 371 |
+
fn=lambda session, selected: (
|
| 372 |
+
images.load_from_session(session) if selected == 2
|
| 373 |
+
else (gr.update(), gr.update(), gr.update())
|
| 374 |
+
),
|
| 375 |
+
inputs=[session_state, tabs],
|
| 376 |
+
outputs=[
|
| 377 |
+
tab3["images_gallery"],
|
| 378 |
+
tab3["image_count"],
|
| 379 |
+
tab3["resume_warning"],
|
| 380 |
+
],
|
| 381 |
+
)
|
| 382 |
+
|
| 383 |
+
# Tab 4 (Observations): Load observation form fields
|
| 384 |
+
tabs.select(
|
| 385 |
+
fn=lambda session, selected: (
|
| 386 |
+
observations.load_form_from_session(session) if selected == 3
|
| 387 |
+
else tuple([gr.update()] * 15)
|
| 388 |
+
),
|
| 389 |
+
inputs=[session_state, tabs],
|
| 390 |
+
outputs=[
|
| 391 |
+
tab4["smoke_odor"],
|
| 392 |
+
tab4["odor_intensity"],
|
| 393 |
+
tab4["visible_soot"],
|
| 394 |
+
tab4["soot_description"],
|
| 395 |
+
tab4["large_char"],
|
| 396 |
+
tab4["char_density"],
|
| 397 |
+
tab4["ash_residue"],
|
| 398 |
+
tab4["ash_description"],
|
| 399 |
+
tab4["surface_discoloration"],
|
| 400 |
+
tab4["discoloration_description"],
|
| 401 |
+
tab4["dust_interference"],
|
| 402 |
+
tab4["dust_notes"],
|
| 403 |
+
tab4["wildfire_indicators"],
|
| 404 |
+
tab4["wildfire_notes"],
|
| 405 |
+
tab4["additional_notes"],
|
| 406 |
+
],
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
return app
|
| 410 |
+
|
| 411 |
+
|
| 412 |
+
def main():
|
| 413 |
+
"""Entry point for the application."""
|
| 414 |
+
print(f"Starting FDAM AI Pipeline...")
|
| 415 |
+
print(f"Mock models: {settings.mock_models}")
|
| 416 |
+
print(f"Server: {settings.server_host}:{settings.server_port}")
|
| 417 |
+
|
| 418 |
+
app = create_app()
|
| 419 |
+
app.launch(
|
| 420 |
+
server_name=settings.server_host,
|
| 421 |
+
server_port=settings.server_port,
|
| 422 |
+
share=False,
|
| 423 |
+
head=get_head_html(), # Inject localStorage JavaScript
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
if __name__ == "__main__":
|
| 428 |
+
main()
|
config/__init__.py
ADDED
|
File without changes
|
config/inference.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Model inference configuration parameters."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
@dataclass
|
| 7 |
+
class VisionInferenceConfig:
|
| 8 |
+
"""Configuration for vision model inference."""
|
| 9 |
+
|
| 10 |
+
max_new_tokens: int = 4096
|
| 11 |
+
temperature: float = 0.1
|
| 12 |
+
top_p: float = 0.9
|
| 13 |
+
do_sample: bool = True
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class EmbeddingConfig:
|
| 18 |
+
"""Configuration for embedding model."""
|
| 19 |
+
|
| 20 |
+
embedding_dimension: int = 768
|
| 21 |
+
normalize: bool = True
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@dataclass
|
| 25 |
+
class RerankerConfig:
|
| 26 |
+
"""Configuration for reranker model."""
|
| 27 |
+
|
| 28 |
+
top_k: int = 5
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
# Default configurations
|
| 32 |
+
vision_config = VisionInferenceConfig()
|
| 33 |
+
embedding_config = EmbeddingConfig()
|
| 34 |
+
reranker_config = RerankerConfig()
|
config/settings.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Application settings with environment variable support."""
|
| 2 |
+
|
| 3 |
+
from typing import Literal
|
| 4 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class Settings(BaseSettings):
|
| 8 |
+
"""FDAM AI Pipeline configuration."""
|
| 9 |
+
|
| 10 |
+
# Environment
|
| 11 |
+
environment: Literal["development", "production"] = "development"
|
| 12 |
+
|
| 13 |
+
# Model loading - set MOCK_MODELS=true for local dev on RTX 4090
|
| 14 |
+
mock_models: bool = True
|
| 15 |
+
|
| 16 |
+
# Model paths (for production on HuggingFace Spaces)
|
| 17 |
+
vision_model: str = "Qwen/Qwen3-VL-30B-A3B-Instruct"
|
| 18 |
+
embedding_model: str = "Qwen/Qwen3-VL-Embedding-8B"
|
| 19 |
+
reranker_model: str = "Qwen/Qwen3-VL-Reranker-8B"
|
| 20 |
+
|
| 21 |
+
# Fallback vision model if VRAM issues
|
| 22 |
+
vision_model_fallback: str = "Qwen/Qwen3-VL-8B-Instruct"
|
| 23 |
+
|
| 24 |
+
# ChromaDB
|
| 25 |
+
chroma_persist_dir: str = "./chroma_db"
|
| 26 |
+
|
| 27 |
+
# Knowledge base
|
| 28 |
+
knowledge_base_dir: str = "./RAG-KB"
|
| 29 |
+
|
| 30 |
+
# Gradio server (0.0.0.0 required for WSL)
|
| 31 |
+
server_host: str = "0.0.0.0"
|
| 32 |
+
server_port: int = 7860
|
| 33 |
+
|
| 34 |
+
# Assessment limits
|
| 35 |
+
max_images_per_assessment: int = 20
|
| 36 |
+
|
| 37 |
+
model_config = SettingsConfigDict(
|
| 38 |
+
env_file=".env",
|
| 39 |
+
env_prefix="",
|
| 40 |
+
case_sensitive=False,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Singleton instance
|
| 45 |
+
settings = Settings()
|
models/__init__.py
ADDED
|
File without changes
|
models/loader.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Model loading with mock/real switching based on environment."""
|
| 2 |
+
|
| 3 |
+
from typing import Union
|
| 4 |
+
|
| 5 |
+
from config.settings import settings
|
| 6 |
+
|
| 7 |
+
# Type alias for model stack
|
| 8 |
+
ModelStack = Union["MockModelStack", "RealModelStack"] # noqa: F821
|
| 9 |
+
|
| 10 |
+
# Lazy singleton
|
| 11 |
+
_model_stack: ModelStack | None = None
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def get_model_stack() -> ModelStack:
|
| 15 |
+
"""Get model stack based on environment configuration."""
|
| 16 |
+
if settings.mock_models:
|
| 17 |
+
from models.mock import MockModelStack
|
| 18 |
+
|
| 19 |
+
return MockModelStack().load_all()
|
| 20 |
+
else:
|
| 21 |
+
from models.real import RealModelStack
|
| 22 |
+
|
| 23 |
+
return RealModelStack().load_all()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_models() -> ModelStack:
|
| 27 |
+
"""Get or create the singleton model stack."""
|
| 28 |
+
global _model_stack
|
| 29 |
+
if _model_stack is None:
|
| 30 |
+
_model_stack = get_model_stack()
|
| 31 |
+
return _model_stack
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def reset_models() -> None:
|
| 35 |
+
"""Reset the model stack (useful for testing)."""
|
| 36 |
+
global _model_stack
|
| 37 |
+
_model_stack = None
|
models/mock.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Mock model implementations for local development on RTX 4090."""
|
| 2 |
+
|
| 3 |
+
import random
|
| 4 |
+
from typing import Any
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class MockVisionModel:
|
| 9 |
+
"""Mock vision model that returns realistic JSON responses."""
|
| 10 |
+
|
| 11 |
+
ZONES = ["burn", "near-field", "far-field"]
|
| 12 |
+
CONDITIONS = ["background", "light", "moderate", "heavy", "structural-damage"]
|
| 13 |
+
MATERIALS = [
|
| 14 |
+
{"type": "steel", "category": "non-porous"},
|
| 15 |
+
{"type": "concrete", "category": "non-porous"},
|
| 16 |
+
{"type": "glass", "category": "non-porous"},
|
| 17 |
+
{"type": "cmu", "category": "non-porous"},
|
| 18 |
+
{"type": "drywall-painted", "category": "semi-porous"},
|
| 19 |
+
{"type": "wood-sealed", "category": "semi-porous"},
|
| 20 |
+
{"type": "drywall-unpainted", "category": "porous"},
|
| 21 |
+
{"type": "carpet", "category": "porous"},
|
| 22 |
+
{"type": "insulation-fiberglass", "category": "porous"},
|
| 23 |
+
{"type": "acoustic-tile", "category": "porous"},
|
| 24 |
+
{"type": "ductwork-rigid", "category": "hvac"},
|
| 25 |
+
{"type": "ductwork-flexible", "category": "hvac"},
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
def analyze_image(self, image: Image.Image, context: str = "") -> dict[str, Any]:
|
| 29 |
+
"""Return mock vision analysis matching the spec schema."""
|
| 30 |
+
selected_zone = random.choice(self.ZONES)
|
| 31 |
+
selected_condition = random.choice(self.CONDITIONS)
|
| 32 |
+
|
| 33 |
+
# Generate 2-4 random materials
|
| 34 |
+
num_materials = random.randint(2, 4)
|
| 35 |
+
materials = []
|
| 36 |
+
for _ in range(num_materials):
|
| 37 |
+
mat = random.choice(self.MATERIALS).copy()
|
| 38 |
+
mat.update(
|
| 39 |
+
{
|
| 40 |
+
"confidence": round(random.uniform(0.75, 0.95), 2),
|
| 41 |
+
"location_description": "Visible in image",
|
| 42 |
+
"bounding_box": {
|
| 43 |
+
"x": round(random.uniform(0.1, 0.3), 2),
|
| 44 |
+
"y": round(random.uniform(0.1, 0.3), 2),
|
| 45 |
+
"width": round(random.uniform(0.2, 0.5), 2),
|
| 46 |
+
"height": round(random.uniform(0.2, 0.5), 2),
|
| 47 |
+
},
|
| 48 |
+
}
|
| 49 |
+
)
|
| 50 |
+
materials.append(mat)
|
| 51 |
+
|
| 52 |
+
soot_visible = random.choice([True, False])
|
| 53 |
+
char_visible = random.choice([True, False])
|
| 54 |
+
ash_visible = random.choice([True, False])
|
| 55 |
+
|
| 56 |
+
return {
|
| 57 |
+
"zone": {
|
| 58 |
+
"classification": selected_zone,
|
| 59 |
+
"confidence": round(random.uniform(0.7, 0.95), 2),
|
| 60 |
+
"reasoning": f"Mock analysis detected {selected_zone} zone characteristics based on visible damage patterns",
|
| 61 |
+
},
|
| 62 |
+
"condition": {
|
| 63 |
+
"level": selected_condition,
|
| 64 |
+
"confidence": round(random.uniform(0.65, 0.90), 2),
|
| 65 |
+
"reasoning": f"Surface shows {selected_condition} contamination levels",
|
| 66 |
+
},
|
| 67 |
+
"materials": materials,
|
| 68 |
+
"combustion_indicators": {
|
| 69 |
+
"soot_visible": soot_visible,
|
| 70 |
+
"soot_pattern": "Visible deposition on horizontal surfaces"
|
| 71 |
+
if soot_visible
|
| 72 |
+
else None,
|
| 73 |
+
"char_visible": char_visible,
|
| 74 |
+
"char_description": "Angular black particles visible"
|
| 75 |
+
if char_visible
|
| 76 |
+
else None,
|
| 77 |
+
"ash_visible": ash_visible,
|
| 78 |
+
"ash_description": "Gray powdery residue on surfaces"
|
| 79 |
+
if ash_visible
|
| 80 |
+
else None,
|
| 81 |
+
},
|
| 82 |
+
"structural_concerns": [],
|
| 83 |
+
"access_issues": [],
|
| 84 |
+
"recommended_sampling_locations": [
|
| 85 |
+
{
|
| 86 |
+
"description": "Center of visible contamination",
|
| 87 |
+
"sample_type": "tape_lift",
|
| 88 |
+
"priority": "high",
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"description": "Comparison area with less contamination",
|
| 92 |
+
"sample_type": "surface_wipe",
|
| 93 |
+
"priority": "medium",
|
| 94 |
+
},
|
| 95 |
+
],
|
| 96 |
+
"flags_for_review": [],
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
class MockEmbeddingModel:
|
| 101 |
+
"""Mock embedding model that returns random vectors."""
|
| 102 |
+
|
| 103 |
+
def __init__(self, dimension: int = 768):
|
| 104 |
+
self.dimension = dimension
|
| 105 |
+
|
| 106 |
+
def embed(self, text: str) -> list[float]:
|
| 107 |
+
"""Return mock embedding vector."""
|
| 108 |
+
# Use hash of text for reproducibility
|
| 109 |
+
random.seed(hash(text) % (2**32))
|
| 110 |
+
embedding = [random.uniform(-1, 1) for _ in range(self.dimension)]
|
| 111 |
+
random.seed() # Reset seed
|
| 112 |
+
return embedding
|
| 113 |
+
|
| 114 |
+
def embed_batch(self, texts: list[str]) -> list[list[float]]:
|
| 115 |
+
"""Return mock embeddings for a batch of texts."""
|
| 116 |
+
return [self.embed(text) for text in texts]
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class MockRerankerModel:
|
| 120 |
+
"""Mock reranker that returns random scores."""
|
| 121 |
+
|
| 122 |
+
def rerank(self, query: str, documents: list[str]) -> list[float]:
|
| 123 |
+
"""Return mock reranking scores."""
|
| 124 |
+
# Higher scores for documents that share more words with query
|
| 125 |
+
scores = []
|
| 126 |
+
query_words = set(query.lower().split())
|
| 127 |
+
for doc in documents:
|
| 128 |
+
doc_words = set(doc.lower().split())
|
| 129 |
+
overlap = len(query_words & doc_words)
|
| 130 |
+
base_score = overlap / max(len(query_words), 1)
|
| 131 |
+
noise = random.uniform(-0.1, 0.1)
|
| 132 |
+
scores.append(min(1.0, max(0.0, base_score + noise)))
|
| 133 |
+
return scores
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
class MockModelStack:
|
| 137 |
+
"""Mock model stack for local development."""
|
| 138 |
+
|
| 139 |
+
def __init__(self):
|
| 140 |
+
self.vision = MockVisionModel()
|
| 141 |
+
self.embedding = MockEmbeddingModel()
|
| 142 |
+
self.reranker = MockRerankerModel()
|
| 143 |
+
self.loaded = False
|
| 144 |
+
|
| 145 |
+
def load_all(self) -> "MockModelStack":
|
| 146 |
+
"""Simulate model loading."""
|
| 147 |
+
print("[MOCK] Loading mock models for local development...")
|
| 148 |
+
print("[MOCK] Vision model: MockVisionModel")
|
| 149 |
+
print("[MOCK] Embedding model: MockEmbeddingModel")
|
| 150 |
+
print("[MOCK] Reranker model: MockRerankerModel")
|
| 151 |
+
self.loaded = True
|
| 152 |
+
print("[MOCK] All mock models loaded successfully.")
|
| 153 |
+
return self
|
| 154 |
+
|
| 155 |
+
def is_loaded(self) -> bool:
|
| 156 |
+
"""Check if models are loaded."""
|
| 157 |
+
return self.loaded
|
models/real.py
ADDED
|
@@ -0,0 +1,439 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Real model loading for production (HuggingFace Spaces with 4xL4 GPUs).
|
| 2 |
+
|
| 3 |
+
This module loads the actual Qwen3-VL models for production use.
|
| 4 |
+
Requires ~90GB VRAM (4xL4 with 96GB total).
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import logging
|
| 9 |
+
import re
|
| 10 |
+
import torch
|
| 11 |
+
from typing import Any
|
| 12 |
+
from PIL import Image
|
| 13 |
+
|
| 14 |
+
from config.settings import settings
|
| 15 |
+
|
| 16 |
+
logger = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class RealModelStack:
|
| 20 |
+
"""Real model stack for production on HuggingFace Spaces."""
|
| 21 |
+
|
| 22 |
+
def __init__(self):
|
| 23 |
+
self.models: dict[str, Any] = {}
|
| 24 |
+
self.processors: dict[str, Any] = {}
|
| 25 |
+
self.loaded = False
|
| 26 |
+
|
| 27 |
+
def load_all(self) -> "RealModelStack":
|
| 28 |
+
"""Load all models with device_map='auto' for multi-GPU distribution."""
|
| 29 |
+
from transformers import AutoModel, AutoProcessor
|
| 30 |
+
|
| 31 |
+
print(f"Loading models on {'cuda' if torch.cuda.is_available() else 'cpu'}...")
|
| 32 |
+
|
| 33 |
+
# Vision model (~58GB in BF16)
|
| 34 |
+
print(f"Loading vision model: {settings.vision_model}...")
|
| 35 |
+
try:
|
| 36 |
+
from transformers import Qwen3VLMoeForConditionalGeneration
|
| 37 |
+
|
| 38 |
+
self.models["vision"] = Qwen3VLMoeForConditionalGeneration.from_pretrained(
|
| 39 |
+
settings.vision_model,
|
| 40 |
+
torch_dtype=torch.bfloat16,
|
| 41 |
+
device_map="auto",
|
| 42 |
+
trust_remote_code=True,
|
| 43 |
+
)
|
| 44 |
+
self.processors["vision"] = AutoProcessor.from_pretrained(
|
| 45 |
+
settings.vision_model,
|
| 46 |
+
trust_remote_code=True,
|
| 47 |
+
)
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f"Failed to load 30B vision model: {e}")
|
| 50 |
+
print(f"Falling back to {settings.vision_model_fallback}...")
|
| 51 |
+
self.models["vision"] = Qwen3VLMoeForConditionalGeneration.from_pretrained(
|
| 52 |
+
settings.vision_model_fallback,
|
| 53 |
+
torch_dtype=torch.bfloat16,
|
| 54 |
+
device_map="auto",
|
| 55 |
+
trust_remote_code=True,
|
| 56 |
+
)
|
| 57 |
+
self.processors["vision"] = AutoProcessor.from_pretrained(
|
| 58 |
+
settings.vision_model_fallback,
|
| 59 |
+
trust_remote_code=True,
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
# Embedding model (~16GB in BF16)
|
| 63 |
+
print(f"Loading embedding model: {settings.embedding_model}...")
|
| 64 |
+
self.models["embedding"] = AutoModel.from_pretrained(
|
| 65 |
+
settings.embedding_model,
|
| 66 |
+
torch_dtype=torch.bfloat16,
|
| 67 |
+
device_map="auto",
|
| 68 |
+
trust_remote_code=True,
|
| 69 |
+
)
|
| 70 |
+
self.processors["embedding"] = AutoProcessor.from_pretrained(
|
| 71 |
+
settings.embedding_model,
|
| 72 |
+
trust_remote_code=True,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
# Reranker model (~16GB in BF16)
|
| 76 |
+
print(f"Loading reranker model: {settings.reranker_model}...")
|
| 77 |
+
self.models["reranker"] = AutoModel.from_pretrained(
|
| 78 |
+
settings.reranker_model,
|
| 79 |
+
torch_dtype=torch.bfloat16,
|
| 80 |
+
device_map="auto",
|
| 81 |
+
trust_remote_code=True,
|
| 82 |
+
)
|
| 83 |
+
self.processors["reranker"] = AutoProcessor.from_pretrained(
|
| 84 |
+
settings.reranker_model,
|
| 85 |
+
trust_remote_code=True,
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
self.loaded = True
|
| 89 |
+
print("All models loaded successfully.")
|
| 90 |
+
return self
|
| 91 |
+
|
| 92 |
+
def is_loaded(self) -> bool:
|
| 93 |
+
"""Check if models are loaded."""
|
| 94 |
+
return self.loaded
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
class RealVisionModel:
|
| 98 |
+
"""Wrapper for real vision model inference."""
|
| 99 |
+
|
| 100 |
+
# Analysis prompt template for FDAM fire damage assessment
|
| 101 |
+
ANALYSIS_PROMPT = """Analyze this fire damage image and return a JSON response with the following structure:
|
| 102 |
+
|
| 103 |
+
{
|
| 104 |
+
"zone": {
|
| 105 |
+
"classification": "burn" | "near-field" | "far-field",
|
| 106 |
+
"confidence": 0.0-1.0,
|
| 107 |
+
"reasoning": "explanation"
|
| 108 |
+
},
|
| 109 |
+
"condition": {
|
| 110 |
+
"level": "background" | "light" | "moderate" | "heavy" | "structural-damage",
|
| 111 |
+
"confidence": 0.0-1.0,
|
| 112 |
+
"reasoning": "explanation"
|
| 113 |
+
},
|
| 114 |
+
"materials": [
|
| 115 |
+
{
|
| 116 |
+
"type": "material type (e.g., drywall, concrete, steel, wood)",
|
| 117 |
+
"category": "non-porous" | "semi-porous" | "porous" | "hvac",
|
| 118 |
+
"confidence": 0.0-1.0,
|
| 119 |
+
"location_description": "where in image",
|
| 120 |
+
"bounding_box": {"x": 0.0-1.0, "y": 0.0-1.0, "width": 0.0-1.0, "height": 0.0-1.0}
|
| 121 |
+
}
|
| 122 |
+
],
|
| 123 |
+
"combustion_indicators": {
|
| 124 |
+
"soot_visible": true/false,
|
| 125 |
+
"soot_pattern": "description or null",
|
| 126 |
+
"char_visible": true/false,
|
| 127 |
+
"char_description": "description or null",
|
| 128 |
+
"ash_visible": true/false,
|
| 129 |
+
"ash_description": "description or null"
|
| 130 |
+
},
|
| 131 |
+
"structural_concerns": ["list of structural issues if any"],
|
| 132 |
+
"access_issues": ["list of access problems if any"],
|
| 133 |
+
"recommended_sampling_locations": [
|
| 134 |
+
{
|
| 135 |
+
"description": "where to sample",
|
| 136 |
+
"sample_type": "tape_lift" | "surface_wipe" | "air_sample",
|
| 137 |
+
"priority": "high" | "medium" | "low"
|
| 138 |
+
}
|
| 139 |
+
],
|
| 140 |
+
"flags_for_review": ["any items requiring human review"]
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
Zone definitions:
|
| 144 |
+
- burn: Direct fire involvement, visible charring, structural damage
|
| 145 |
+
- near-field: Adjacent to burn zone, heavy smoke/heat exposure, discoloration
|
| 146 |
+
- far-field: Smoke migration only, light deposits, no structural damage
|
| 147 |
+
|
| 148 |
+
Condition definitions:
|
| 149 |
+
- background: No visible contamination
|
| 150 |
+
- light: Faint discoloration, minimal deposits
|
| 151 |
+
- moderate: Visible film/deposits, surface color altered
|
| 152 |
+
- heavy: Thick deposits, surface texture obscured
|
| 153 |
+
- structural-damage: Physical damage requiring repair before cleaning
|
| 154 |
+
|
| 155 |
+
IMPORTANT: Return ONLY valid JSON, no additional text."""
|
| 156 |
+
|
| 157 |
+
def __init__(self, model, processor):
|
| 158 |
+
self.model = model
|
| 159 |
+
self.processor = processor
|
| 160 |
+
|
| 161 |
+
def analyze_image(self, image: Image.Image, context: str = "") -> dict[str, Any]:
|
| 162 |
+
"""Analyze an image and return structured results."""
|
| 163 |
+
try:
|
| 164 |
+
from qwen_vl_utils import process_vision_info
|
| 165 |
+
except ImportError:
|
| 166 |
+
logger.warning("qwen_vl_utils not available, using basic processing")
|
| 167 |
+
process_vision_info = None
|
| 168 |
+
|
| 169 |
+
# Build the analysis prompt
|
| 170 |
+
prompt = self.ANALYSIS_PROMPT
|
| 171 |
+
if context:
|
| 172 |
+
prompt = f"Context: {context}\n\n{prompt}"
|
| 173 |
+
|
| 174 |
+
# Prepare messages in Qwen-VL format
|
| 175 |
+
messages = [
|
| 176 |
+
{
|
| 177 |
+
"role": "user",
|
| 178 |
+
"content": [
|
| 179 |
+
{"type": "image", "image": image},
|
| 180 |
+
{"type": "text", "text": prompt},
|
| 181 |
+
],
|
| 182 |
+
}
|
| 183 |
+
]
|
| 184 |
+
|
| 185 |
+
try:
|
| 186 |
+
# Apply chat template
|
| 187 |
+
text = self.processor.apply_chat_template(
|
| 188 |
+
messages, tokenize=False, add_generation_prompt=True
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# Process vision info if available
|
| 192 |
+
if process_vision_info:
|
| 193 |
+
image_inputs, video_inputs = process_vision_info(messages)
|
| 194 |
+
inputs = self.processor(
|
| 195 |
+
text=[text],
|
| 196 |
+
images=image_inputs,
|
| 197 |
+
videos=video_inputs,
|
| 198 |
+
return_tensors="pt",
|
| 199 |
+
padding=True,
|
| 200 |
+
)
|
| 201 |
+
else:
|
| 202 |
+
# Fallback: basic image processing
|
| 203 |
+
inputs = self.processor(
|
| 204 |
+
text=[text],
|
| 205 |
+
images=[image],
|
| 206 |
+
return_tensors="pt",
|
| 207 |
+
padding=True,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# Move inputs to model device
|
| 211 |
+
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
| 212 |
+
|
| 213 |
+
# Generate response
|
| 214 |
+
with torch.no_grad():
|
| 215 |
+
outputs = self.model.generate(
|
| 216 |
+
**inputs,
|
| 217 |
+
max_new_tokens=2048,
|
| 218 |
+
do_sample=False,
|
| 219 |
+
temperature=None,
|
| 220 |
+
top_p=None,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
# Decode response
|
| 224 |
+
response_text = self.processor.decode(
|
| 225 |
+
outputs[0], skip_special_tokens=True
|
| 226 |
+
)
|
| 227 |
+
|
| 228 |
+
# Parse JSON from response
|
| 229 |
+
return self._parse_vision_response(response_text)
|
| 230 |
+
|
| 231 |
+
except Exception as e:
|
| 232 |
+
logger.error(f"Vision analysis failed: {e}")
|
| 233 |
+
return self._get_fallback_response(str(e))
|
| 234 |
+
|
| 235 |
+
def _parse_vision_response(self, response: str) -> dict[str, Any]:
|
| 236 |
+
"""Parse JSON response from vision model."""
|
| 237 |
+
try:
|
| 238 |
+
# Try to extract JSON from response
|
| 239 |
+
# Look for JSON block in various formats
|
| 240 |
+
json_match = re.search(r'\{[\s\S]*\}', response)
|
| 241 |
+
if json_match:
|
| 242 |
+
json_str = json_match.group()
|
| 243 |
+
return json.loads(json_str)
|
| 244 |
+
else:
|
| 245 |
+
logger.warning("No JSON found in vision response")
|
| 246 |
+
return self._get_fallback_response("No JSON in response")
|
| 247 |
+
except json.JSONDecodeError as e:
|
| 248 |
+
logger.warning(f"Failed to parse vision JSON: {e}")
|
| 249 |
+
return self._get_fallback_response(f"JSON parse error: {e}")
|
| 250 |
+
|
| 251 |
+
def _get_fallback_response(self, reason: str) -> dict[str, Any]:
|
| 252 |
+
"""Return fallback response when analysis fails."""
|
| 253 |
+
return {
|
| 254 |
+
"zone": {
|
| 255 |
+
"classification": "far-field",
|
| 256 |
+
"confidence": 0.3,
|
| 257 |
+
"reasoning": f"Fallback due to: {reason}",
|
| 258 |
+
},
|
| 259 |
+
"condition": {
|
| 260 |
+
"level": "light",
|
| 261 |
+
"confidence": 0.3,
|
| 262 |
+
"reasoning": f"Fallback due to: {reason}",
|
| 263 |
+
},
|
| 264 |
+
"materials": [
|
| 265 |
+
{
|
| 266 |
+
"type": "general-surface",
|
| 267 |
+
"category": "semi-porous",
|
| 268 |
+
"confidence": 0.3,
|
| 269 |
+
"location_description": "Unable to determine",
|
| 270 |
+
"bounding_box": {"x": 0.0, "y": 0.0, "width": 1.0, "height": 1.0},
|
| 271 |
+
}
|
| 272 |
+
],
|
| 273 |
+
"combustion_indicators": {
|
| 274 |
+
"soot_visible": False,
|
| 275 |
+
"soot_pattern": None,
|
| 276 |
+
"char_visible": False,
|
| 277 |
+
"char_description": None,
|
| 278 |
+
"ash_visible": False,
|
| 279 |
+
"ash_description": None,
|
| 280 |
+
},
|
| 281 |
+
"structural_concerns": [],
|
| 282 |
+
"access_issues": [],
|
| 283 |
+
"recommended_sampling_locations": [],
|
| 284 |
+
"flags_for_review": [f"Analysis failed: {reason}"],
|
| 285 |
+
"_fallback_used": True,
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class RealEmbeddingModel:
|
| 290 |
+
"""Wrapper for real embedding model inference."""
|
| 291 |
+
|
| 292 |
+
def __init__(self, model, processor):
|
| 293 |
+
self.model = model
|
| 294 |
+
self.processor = processor
|
| 295 |
+
|
| 296 |
+
def embed(self, text: str) -> list[float]:
|
| 297 |
+
"""Generate embedding for text using mean pooling."""
|
| 298 |
+
try:
|
| 299 |
+
# Tokenize input
|
| 300 |
+
inputs = self.processor(
|
| 301 |
+
text,
|
| 302 |
+
return_tensors="pt",
|
| 303 |
+
padding=True,
|
| 304 |
+
truncation=True,
|
| 305 |
+
max_length=512,
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
# Move to model device
|
| 309 |
+
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
| 310 |
+
|
| 311 |
+
# Generate embeddings
|
| 312 |
+
with torch.no_grad():
|
| 313 |
+
outputs = self.model(**inputs)
|
| 314 |
+
|
| 315 |
+
# Use mean pooling over sequence dimension
|
| 316 |
+
# outputs.last_hidden_state shape: (batch, seq_len, hidden_dim)
|
| 317 |
+
attention_mask = inputs.get("attention_mask")
|
| 318 |
+
if attention_mask is not None:
|
| 319 |
+
# Mask-weighted mean pooling
|
| 320 |
+
mask_expanded = attention_mask.unsqueeze(-1).expand(
|
| 321 |
+
outputs.last_hidden_state.size()
|
| 322 |
+
).float()
|
| 323 |
+
sum_embeddings = torch.sum(
|
| 324 |
+
outputs.last_hidden_state * mask_expanded, dim=1
|
| 325 |
+
)
|
| 326 |
+
sum_mask = torch.clamp(mask_expanded.sum(dim=1), min=1e-9)
|
| 327 |
+
embeddings = sum_embeddings / sum_mask
|
| 328 |
+
else:
|
| 329 |
+
# Simple mean if no attention mask
|
| 330 |
+
embeddings = outputs.last_hidden_state.mean(dim=1)
|
| 331 |
+
|
| 332 |
+
# Normalize
|
| 333 |
+
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 334 |
+
|
| 335 |
+
return embeddings[0].cpu().tolist()
|
| 336 |
+
|
| 337 |
+
except Exception as e:
|
| 338 |
+
logger.error(f"Embedding generation failed: {e}")
|
| 339 |
+
# Return zero vector as fallback
|
| 340 |
+
hidden_size = getattr(self.model.config, "hidden_size", 4096)
|
| 341 |
+
return [0.0] * hidden_size
|
| 342 |
+
|
| 343 |
+
def embed_batch(self, texts: list[str]) -> list[list[float]]:
|
| 344 |
+
"""Generate embeddings for a batch of texts."""
|
| 345 |
+
return [self.embed(text) for text in texts]
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
class RealRerankerModel:
|
| 349 |
+
"""Wrapper for real reranker model inference."""
|
| 350 |
+
|
| 351 |
+
def __init__(self, model, processor):
|
| 352 |
+
self.model = model
|
| 353 |
+
self.processor = processor
|
| 354 |
+
|
| 355 |
+
def rerank(self, query: str, documents: list[str]) -> list[float]:
|
| 356 |
+
"""Rerank documents by relevance to query.
|
| 357 |
+
|
| 358 |
+
Returns a list of relevance scores for each document.
|
| 359 |
+
Higher scores indicate more relevant documents.
|
| 360 |
+
"""
|
| 361 |
+
if not documents:
|
| 362 |
+
return []
|
| 363 |
+
|
| 364 |
+
scores = []
|
| 365 |
+
for doc in documents:
|
| 366 |
+
try:
|
| 367 |
+
score = self._score_pair(query, doc)
|
| 368 |
+
scores.append(score)
|
| 369 |
+
except Exception as e:
|
| 370 |
+
logger.warning(f"Reranking failed for document: {e}")
|
| 371 |
+
scores.append(0.0)
|
| 372 |
+
|
| 373 |
+
return scores
|
| 374 |
+
|
| 375 |
+
def _score_pair(self, query: str, document: str) -> float:
|
| 376 |
+
"""Score a single query-document pair."""
|
| 377 |
+
# Format as query-document pair for cross-encoder
|
| 378 |
+
# Truncate document if too long
|
| 379 |
+
max_doc_len = 400
|
| 380 |
+
if len(document) > max_doc_len:
|
| 381 |
+
document = document[:max_doc_len] + "..."
|
| 382 |
+
|
| 383 |
+
pair_text = f"Query: {query}\n\nDocument: {document}"
|
| 384 |
+
|
| 385 |
+
try:
|
| 386 |
+
inputs = self.processor(
|
| 387 |
+
pair_text,
|
| 388 |
+
return_tensors="pt",
|
| 389 |
+
padding=True,
|
| 390 |
+
truncation=True,
|
| 391 |
+
max_length=512,
|
| 392 |
+
)
|
| 393 |
+
|
| 394 |
+
# Move to model device
|
| 395 |
+
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
| 396 |
+
|
| 397 |
+
with torch.no_grad():
|
| 398 |
+
outputs = self.model(**inputs)
|
| 399 |
+
|
| 400 |
+
# Use CLS token representation for scoring
|
| 401 |
+
# Take mean of last hidden state as a simple relevance score
|
| 402 |
+
cls_embedding = outputs.last_hidden_state[:, 0, :]
|
| 403 |
+
|
| 404 |
+
# Normalize and take mean as score
|
| 405 |
+
score = cls_embedding.norm(dim=-1).mean().item()
|
| 406 |
+
|
| 407 |
+
# Normalize score to 0-1 range (approximate)
|
| 408 |
+
# This is heuristic; actual reranker models have specific score heads
|
| 409 |
+
score = min(1.0, max(0.0, score / 100.0))
|
| 410 |
+
|
| 411 |
+
return score
|
| 412 |
+
|
| 413 |
+
except Exception as e:
|
| 414 |
+
logger.error(f"Reranker scoring failed: {e}")
|
| 415 |
+
return 0.0
|
| 416 |
+
|
| 417 |
+
def rerank_with_indices(
|
| 418 |
+
self, query: str, documents: list[str], top_k: int = None
|
| 419 |
+
) -> list[tuple[int, float]]:
|
| 420 |
+
"""Rerank and return sorted (index, score) tuples.
|
| 421 |
+
|
| 422 |
+
Args:
|
| 423 |
+
query: The search query
|
| 424 |
+
documents: List of documents to rerank
|
| 425 |
+
top_k: Optional limit on number of results
|
| 426 |
+
|
| 427 |
+
Returns:
|
| 428 |
+
List of (original_index, score) tuples, sorted by score descending
|
| 429 |
+
"""
|
| 430 |
+
scores = self.rerank(query, documents)
|
| 431 |
+
|
| 432 |
+
# Create (index, score) pairs and sort by score descending
|
| 433 |
+
indexed_scores = list(enumerate(scores))
|
| 434 |
+
indexed_scores.sort(key=lambda x: x[1], reverse=True)
|
| 435 |
+
|
| 436 |
+
if top_k is not None:
|
| 437 |
+
indexed_scores = indexed_scores[:top_k]
|
| 438 |
+
|
| 439 |
+
return indexed_scores
|
pipeline/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM Pipeline - Fire Damage Assessment Processing.
|
| 2 |
+
|
| 3 |
+
This module provides the core processing pipeline for generating
|
| 4 |
+
fire damage assessment reports using AI vision analysis and
|
| 5 |
+
RAG-enhanced methodology lookup.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from .calculations import FDAMCalculator
|
| 9 |
+
from .dispositions import DispositionEngine
|
| 10 |
+
from .generator import DocumentGenerator
|
| 11 |
+
from .main import FDAMPipeline, PipelineResult
|
| 12 |
+
from .pdf_generator import PDFGenerator, PDFResult, generate_sow_pdf
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"FDAMCalculator",
|
| 16 |
+
"DispositionEngine",
|
| 17 |
+
"DocumentGenerator",
|
| 18 |
+
"FDAMPipeline",
|
| 19 |
+
"PipelineResult",
|
| 20 |
+
"PDFGenerator",
|
| 21 |
+
"PDFResult",
|
| 22 |
+
"generate_sow_pdf",
|
| 23 |
+
]
|
pipeline/calculations.py
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM Calculations Module.
|
| 2 |
+
|
| 3 |
+
Implements deterministic calculations from FDAM v4.0.1:
|
| 4 |
+
- Air filtration requirements (ACH per NADCA ACR 2021)
|
| 5 |
+
- Sample density guidelines
|
| 6 |
+
- Regulatory flags
|
| 7 |
+
- Metals thresholds lookup
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import math
|
| 11 |
+
from dataclasses import dataclass, field
|
| 12 |
+
from typing import Literal, Optional
|
| 13 |
+
|
| 14 |
+
from ui.state import SessionState
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class AirFiltrationResult:
|
| 19 |
+
"""Air filtration calculation results."""
|
| 20 |
+
|
| 21 |
+
total_volume_cf: float
|
| 22 |
+
required_ach: int
|
| 23 |
+
unit_cfm: int
|
| 24 |
+
units_required: int
|
| 25 |
+
calculation_notes: str
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@dataclass
|
| 29 |
+
class SampleDensityResult:
|
| 30 |
+
"""Sample density calculation results."""
|
| 31 |
+
|
| 32 |
+
total_area_sf: float
|
| 33 |
+
tape_lifts_min: int
|
| 34 |
+
tape_lifts_max: int
|
| 35 |
+
surface_wipes_min: int
|
| 36 |
+
surface_wipes_max: int
|
| 37 |
+
ceiling_deck_samples: int
|
| 38 |
+
notes: list[str] = field(default_factory=list)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@dataclass
|
| 42 |
+
class RegulatoryFlags:
|
| 43 |
+
"""Regulatory requirements based on building characteristics."""
|
| 44 |
+
|
| 45 |
+
lbp_survey_required: bool = False
|
| 46 |
+
acm_survey_required: bool = False
|
| 47 |
+
acm_survey_recommended: bool = False
|
| 48 |
+
enhanced_childcare_thresholds: bool = False
|
| 49 |
+
notes: list[str] = field(default_factory=list)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
@dataclass
|
| 53 |
+
class MetalsThresholds:
|
| 54 |
+
"""Metals clearance thresholds for a facility type."""
|
| 55 |
+
|
| 56 |
+
lead_ug_100cm2: float
|
| 57 |
+
cadmium_ug_100cm2: float
|
| 58 |
+
arsenic_ug_100cm2: float
|
| 59 |
+
chromium_vi_ug_100cm2: float
|
| 60 |
+
beryllium_ug_100cm2: float
|
| 61 |
+
facility_type: str
|
| 62 |
+
source: str = "BNL SOP IH75190, Attachment 9.3"
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Threshold lookup tables from BNL SOP IH75190
|
| 66 |
+
METALS_THRESHOLDS = {
|
| 67 |
+
"non-operational": MetalsThresholds(
|
| 68 |
+
lead_ug_100cm2=22.0,
|
| 69 |
+
cadmium_ug_100cm2=3.3,
|
| 70 |
+
arsenic_ug_100cm2=6.7,
|
| 71 |
+
chromium_vi_ug_100cm2=3.3,
|
| 72 |
+
beryllium_ug_100cm2=0.2,
|
| 73 |
+
facility_type="Non-Operational",
|
| 74 |
+
),
|
| 75 |
+
"operational": MetalsThresholds(
|
| 76 |
+
lead_ug_100cm2=500.0,
|
| 77 |
+
cadmium_ug_100cm2=50.0,
|
| 78 |
+
arsenic_ug_100cm2=100.0,
|
| 79 |
+
chromium_vi_ug_100cm2=50.0,
|
| 80 |
+
beryllium_ug_100cm2=3.0,
|
| 81 |
+
facility_type="Operational",
|
| 82 |
+
),
|
| 83 |
+
"public-childcare": MetalsThresholds(
|
| 84 |
+
lead_ug_100cm2=4.3, # EPA/HUD October 2024 for floors
|
| 85 |
+
cadmium_ug_100cm2=3.3, # Use non-operational as baseline
|
| 86 |
+
arsenic_ug_100cm2=6.7,
|
| 87 |
+
chromium_vi_ug_100cm2=3.3,
|
| 88 |
+
beryllium_ug_100cm2=0.2,
|
| 89 |
+
facility_type="Public/Childcare",
|
| 90 |
+
source="EPA/HUD October 2024 + BNL SOP IH75190",
|
| 91 |
+
),
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
# Particulate thresholds from EAA Method Guide
|
| 95 |
+
PARTICULATE_THRESHOLDS = {
|
| 96 |
+
"ash_char": {
|
| 97 |
+
"clearance": 150, # cts/cm²
|
| 98 |
+
"unit": "cts/cm²",
|
| 99 |
+
"source": "EAA Method Guide / FDAM §1.5",
|
| 100 |
+
},
|
| 101 |
+
"aciniform_soot": {
|
| 102 |
+
"clearance": 500, # cts/cm²
|
| 103 |
+
"unit": "cts/cm²",
|
| 104 |
+
"source": "EAA Method Guide / FDAM §1.5",
|
| 105 |
+
},
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
class FDAMCalculator:
|
| 110 |
+
"""Calculator for FDAM deterministic formulas."""
|
| 111 |
+
|
| 112 |
+
# Default air scrubber specifications
|
| 113 |
+
DEFAULT_UNIT_CFM = 2000
|
| 114 |
+
DEFAULT_ACH = 4 # Per NADCA ACR 2021
|
| 115 |
+
|
| 116 |
+
def calculate_air_filtration(
|
| 117 |
+
self,
|
| 118 |
+
total_area_sf: float,
|
| 119 |
+
avg_ceiling_height_ft: float,
|
| 120 |
+
unit_cfm: int = DEFAULT_UNIT_CFM,
|
| 121 |
+
required_ach: int = DEFAULT_ACH,
|
| 122 |
+
) -> AirFiltrationResult:
|
| 123 |
+
"""Calculate air filtration requirements per NADCA ACR 2021.
|
| 124 |
+
|
| 125 |
+
Formula: Units = (Volume CF × ACH) / (Unit CFM × 60)
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
total_area_sf: Total floor area in square feet
|
| 129 |
+
avg_ceiling_height_ft: Average ceiling height in feet
|
| 130 |
+
unit_cfm: CFM rating of air scrubber units (default 2000)
|
| 131 |
+
required_ach: Required air changes per hour (default 4)
|
| 132 |
+
|
| 133 |
+
Returns:
|
| 134 |
+
AirFiltrationResult with calculation details
|
| 135 |
+
"""
|
| 136 |
+
total_volume_cf = total_area_sf * avg_ceiling_height_ft
|
| 137 |
+
|
| 138 |
+
# Formula from FDAM §5.3
|
| 139 |
+
units_required = math.ceil(
|
| 140 |
+
(total_volume_cf * required_ach) / (unit_cfm * 60)
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Minimum 1 unit
|
| 144 |
+
units_required = max(1, units_required)
|
| 145 |
+
|
| 146 |
+
calculation_notes = (
|
| 147 |
+
f"({total_volume_cf:,.0f} CF × {required_ach} ACH) / "
|
| 148 |
+
f"({unit_cfm} CFM × 60) = {units_required} units"
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
return AirFiltrationResult(
|
| 152 |
+
total_volume_cf=total_volume_cf,
|
| 153 |
+
required_ach=required_ach,
|
| 154 |
+
unit_cfm=unit_cfm,
|
| 155 |
+
units_required=units_required,
|
| 156 |
+
calculation_notes=calculation_notes,
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
def calculate_sample_density(
|
| 160 |
+
self,
|
| 161 |
+
total_area_sf: float,
|
| 162 |
+
has_ceiling_deck: bool = True,
|
| 163 |
+
surface_types_count: int = 3,
|
| 164 |
+
) -> SampleDensityResult:
|
| 165 |
+
"""Calculate sample density per FDAM §2.3.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
total_area_sf: Total floor area in square feet
|
| 169 |
+
has_ceiling_deck: Whether ceiling deck surfaces are present
|
| 170 |
+
surface_types_count: Number of distinct surface types
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
SampleDensityResult with recommended sample counts
|
| 174 |
+
"""
|
| 175 |
+
notes = []
|
| 176 |
+
|
| 177 |
+
# Base sample density by area size
|
| 178 |
+
if total_area_sf < 5000:
|
| 179 |
+
tape_min, tape_max = 3, 5
|
| 180 |
+
wipe_min, wipe_max = 3, 5
|
| 181 |
+
notes.append("Small area (<5,000 SF): standard sampling density")
|
| 182 |
+
elif total_area_sf <= 25000:
|
| 183 |
+
tape_min, tape_max = 5, 10
|
| 184 |
+
wipe_min, wipe_max = 5, 10
|
| 185 |
+
notes.append("Medium area (5,000-25,000 SF): moderate sampling density")
|
| 186 |
+
else:
|
| 187 |
+
# Scale for larger areas
|
| 188 |
+
tape_min, tape_max = 10, 15
|
| 189 |
+
wipe_min, wipe_max = 10, 15
|
| 190 |
+
notes.append("Large area (>25,000 SF): enhanced sampling density")
|
| 191 |
+
|
| 192 |
+
# Multiply by surface types
|
| 193 |
+
tape_min *= surface_types_count
|
| 194 |
+
tape_max *= surface_types_count
|
| 195 |
+
wipe_min *= surface_types_count
|
| 196 |
+
wipe_max *= surface_types_count
|
| 197 |
+
|
| 198 |
+
# Ceiling deck enhanced sampling (1 per 2,500 SF per FDAM §4.5)
|
| 199 |
+
ceiling_deck_samples = 0
|
| 200 |
+
if has_ceiling_deck:
|
| 201 |
+
ceiling_deck_samples = max(1, math.ceil(total_area_sf / 2500))
|
| 202 |
+
notes.append(
|
| 203 |
+
f"Ceiling deck: {ceiling_deck_samples} samples "
|
| 204 |
+
f"(1 per 2,500 SF per FDAM §4.5)"
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
return SampleDensityResult(
|
| 208 |
+
total_area_sf=total_area_sf,
|
| 209 |
+
tape_lifts_min=tape_min,
|
| 210 |
+
tape_lifts_max=tape_max,
|
| 211 |
+
surface_wipes_min=wipe_min,
|
| 212 |
+
surface_wipes_max=wipe_max,
|
| 213 |
+
ceiling_deck_samples=ceiling_deck_samples,
|
| 214 |
+
notes=notes,
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
def get_regulatory_flags(
|
| 218 |
+
self,
|
| 219 |
+
construction_era: Literal["pre-1980", "1980-2000", "post-2000"],
|
| 220 |
+
facility_classification: Literal["operational", "non-operational", "public-childcare"],
|
| 221 |
+
) -> RegulatoryFlags:
|
| 222 |
+
"""Determine regulatory requirements based on building characteristics.
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
construction_era: Building construction era
|
| 226 |
+
facility_classification: Facility type
|
| 227 |
+
|
| 228 |
+
Returns:
|
| 229 |
+
RegulatoryFlags with applicable requirements
|
| 230 |
+
"""
|
| 231 |
+
flags = RegulatoryFlags()
|
| 232 |
+
|
| 233 |
+
# Lead-based paint (pre-1978)
|
| 234 |
+
if construction_era == "pre-1980":
|
| 235 |
+
flags.lbp_survey_required = True
|
| 236 |
+
flags.notes.append("LBP survey required (pre-1978 construction)")
|
| 237 |
+
|
| 238 |
+
# Asbestos (pre-1980 required, 1980-2000 recommended)
|
| 239 |
+
if construction_era == "pre-1980":
|
| 240 |
+
flags.acm_survey_required = True
|
| 241 |
+
flags.notes.append("ACM survey required (pre-1980 construction)")
|
| 242 |
+
elif construction_era == "1980-2000":
|
| 243 |
+
flags.acm_survey_recommended = True
|
| 244 |
+
flags.notes.append("ACM survey recommended (1980-2000 construction)")
|
| 245 |
+
|
| 246 |
+
# Enhanced thresholds for public/childcare
|
| 247 |
+
if facility_classification == "public-childcare":
|
| 248 |
+
flags.enhanced_childcare_thresholds = True
|
| 249 |
+
flags.notes.append(
|
| 250 |
+
"Enhanced lead thresholds apply (EPA/HUD October 2024): "
|
| 251 |
+
"4.3 µg/100cm² for floors"
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
return flags
|
| 255 |
+
|
| 256 |
+
def get_metals_thresholds(
|
| 257 |
+
self,
|
| 258 |
+
facility_classification: Literal["operational", "non-operational", "public-childcare"],
|
| 259 |
+
) -> MetalsThresholds:
|
| 260 |
+
"""Get metals clearance thresholds for facility type.
|
| 261 |
+
|
| 262 |
+
Args:
|
| 263 |
+
facility_classification: Facility type
|
| 264 |
+
|
| 265 |
+
Returns:
|
| 266 |
+
MetalsThresholds with applicable limits
|
| 267 |
+
"""
|
| 268 |
+
return METALS_THRESHOLDS.get(
|
| 269 |
+
facility_classification,
|
| 270 |
+
METALS_THRESHOLDS["non-operational"],
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
def calculate_from_session(self, session: SessionState) -> dict:
|
| 274 |
+
"""Run all calculations from a session state.
|
| 275 |
+
|
| 276 |
+
Args:
|
| 277 |
+
session: Current session state with rooms and project info
|
| 278 |
+
|
| 279 |
+
Returns:
|
| 280 |
+
Dictionary with all calculation results
|
| 281 |
+
"""
|
| 282 |
+
# Calculate totals from rooms
|
| 283 |
+
total_area = sum(r.length_ft * r.width_ft for r in session.rooms)
|
| 284 |
+
total_volume = sum(
|
| 285 |
+
r.length_ft * r.width_ft * r.ceiling_height_ft
|
| 286 |
+
for r in session.rooms
|
| 287 |
+
)
|
| 288 |
+
avg_ceiling = (
|
| 289 |
+
total_volume / total_area if total_area > 0 else 10.0
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# Air filtration
|
| 293 |
+
air_filtration = self.calculate_air_filtration(
|
| 294 |
+
total_area_sf=total_area,
|
| 295 |
+
avg_ceiling_height_ft=avg_ceiling,
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
# Sample density
|
| 299 |
+
sample_density = self.calculate_sample_density(
|
| 300 |
+
total_area_sf=total_area,
|
| 301 |
+
has_ceiling_deck=True, # Assume present
|
| 302 |
+
surface_types_count=3, # Default assumption
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
# Regulatory flags
|
| 306 |
+
regulatory = self.get_regulatory_flags(
|
| 307 |
+
construction_era=session.project.construction_era or "post-2000",
|
| 308 |
+
facility_classification=session.project.facility_classification or "non-operational",
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
# Metals thresholds
|
| 312 |
+
thresholds = self.get_metals_thresholds(
|
| 313 |
+
facility_classification=session.project.facility_classification or "non-operational",
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
return {
|
| 317 |
+
"total_area_sf": total_area,
|
| 318 |
+
"total_volume_cf": total_volume,
|
| 319 |
+
"avg_ceiling_height_ft": avg_ceiling,
|
| 320 |
+
"air_filtration": air_filtration,
|
| 321 |
+
"sample_density": sample_density,
|
| 322 |
+
"regulatory_flags": regulatory,
|
| 323 |
+
"metals_thresholds": thresholds,
|
| 324 |
+
"particulate_thresholds": PARTICULATE_THRESHOLDS,
|
| 325 |
+
}
|
pipeline/dispositions.py
ADDED
|
@@ -0,0 +1,364 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM Dispositions Module.
|
| 2 |
+
|
| 3 |
+
Determines cleaning dispositions based on zone classification,
|
| 4 |
+
condition level, and RAG-retrieved methodology context.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import logging
|
| 8 |
+
from dataclasses import dataclass, field
|
| 9 |
+
from typing import Literal, Optional
|
| 10 |
+
|
| 11 |
+
from rag import FDAMRetriever, ChromaVectorStore
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Disposition matrix from FDAM §4.3
|
| 17 |
+
DISPOSITION_MATRIX = {
|
| 18 |
+
# (zone, condition) -> (disposition, protocol)
|
| 19 |
+
("any", "background"): ("no-action", "Document only"),
|
| 20 |
+
("far-field", "light"): ("clean", "Standard protocol"),
|
| 21 |
+
("far-field", "moderate"): ("clean", "Full protocol"),
|
| 22 |
+
("far-field", "heavy"): ("clean", "Aggressive protocol"),
|
| 23 |
+
("near-field", "light"): ("clean", "Full protocol"),
|
| 24 |
+
("near-field", "moderate"): ("clean", "Aggressive protocol, multiple passes"),
|
| 25 |
+
("near-field", "heavy"): ("clean", "Aggressive protocol with verification sampling"),
|
| 26 |
+
("burn-zone", "light"): ("clean", "Post-structural repair; full protocol"),
|
| 27 |
+
("burn-zone", "moderate"): ("clean", "Post-structural repair; aggressive protocol"),
|
| 28 |
+
("burn-zone", "heavy"): ("clean", "Post-structural repair; aggressive protocol"),
|
| 29 |
+
("any", "structural-damage"): ("remove-repair", "Beyond cleaning scope"),
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
# Protocol details
|
| 33 |
+
CLEANING_PROTOCOLS = {
|
| 34 |
+
"standard": {
|
| 35 |
+
"name": "Standard Protocol",
|
| 36 |
+
"steps": [
|
| 37 |
+
"HEPA vacuum all surfaces",
|
| 38 |
+
"Wet wipe with appropriate cleaner",
|
| 39 |
+
"Allow to dry",
|
| 40 |
+
"Visual inspection",
|
| 41 |
+
],
|
| 42 |
+
"passes": 1,
|
| 43 |
+
},
|
| 44 |
+
"full": {
|
| 45 |
+
"name": "Full Protocol",
|
| 46 |
+
"steps": [
|
| 47 |
+
"HEPA vacuum all surfaces (2 passes)",
|
| 48 |
+
"Wet wipe with degreaser/cleaner",
|
| 49 |
+
"Rinse wipe",
|
| 50 |
+
"Allow to dry",
|
| 51 |
+
"Visual inspection",
|
| 52 |
+
"Verification sampling if required",
|
| 53 |
+
],
|
| 54 |
+
"passes": 2,
|
| 55 |
+
},
|
| 56 |
+
"aggressive": {
|
| 57 |
+
"name": "Aggressive Protocol",
|
| 58 |
+
"steps": [
|
| 59 |
+
"HEPA vacuum all surfaces (minimum 3 passes)",
|
| 60 |
+
"Apply cleaning solution, allow dwell time",
|
| 61 |
+
"Agitate with appropriate brush/pad",
|
| 62 |
+
"Wet wipe extraction",
|
| 63 |
+
"Rinse wipe",
|
| 64 |
+
"Repeat cleaning cycle if needed",
|
| 65 |
+
"Verification sampling required",
|
| 66 |
+
],
|
| 67 |
+
"passes": 3,
|
| 68 |
+
},
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@dataclass
|
| 73 |
+
class DispositionResult:
|
| 74 |
+
"""Result of disposition determination."""
|
| 75 |
+
|
| 76 |
+
zone: str
|
| 77 |
+
condition: str
|
| 78 |
+
disposition: Literal["no-action", "clean", "evaluate", "remove", "remove-repair"]
|
| 79 |
+
protocol: str
|
| 80 |
+
protocol_details: Optional[dict] = None
|
| 81 |
+
confidence: float = 1.0
|
| 82 |
+
rag_context: Optional[str] = None
|
| 83 |
+
notes: list[str] = field(default_factory=list)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@dataclass
|
| 87 |
+
class SurfaceDisposition:
|
| 88 |
+
"""Disposition for a specific surface."""
|
| 89 |
+
|
| 90 |
+
surface_type: str
|
| 91 |
+
room_name: str
|
| 92 |
+
zone: str
|
| 93 |
+
condition: str
|
| 94 |
+
disposition: str
|
| 95 |
+
cleaning_method: str
|
| 96 |
+
notes: list[str] = field(default_factory=list)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class DispositionEngine:
|
| 100 |
+
"""Determines cleaning dispositions using FDAM methodology and RAG."""
|
| 101 |
+
|
| 102 |
+
def __init__(self, retriever: Optional[FDAMRetriever] = None):
|
| 103 |
+
"""Initialize disposition engine.
|
| 104 |
+
|
| 105 |
+
Args:
|
| 106 |
+
retriever: Optional RAG retriever. If None, uses default.
|
| 107 |
+
"""
|
| 108 |
+
self._retriever = retriever
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
def retriever(self) -> FDAMRetriever:
|
| 112 |
+
"""Get or create RAG retriever."""
|
| 113 |
+
if self._retriever is None:
|
| 114 |
+
try:
|
| 115 |
+
vs = ChromaVectorStore(persist_directory="chroma_db")
|
| 116 |
+
self._retriever = FDAMRetriever(vectorstore=vs)
|
| 117 |
+
except Exception as e:
|
| 118 |
+
# Fall back to in-memory if no persistent store
|
| 119 |
+
logger.warning(f"ChromaDB init failed, using fallback retriever: {e}")
|
| 120 |
+
self._retriever = FDAMRetriever()
|
| 121 |
+
return self._retriever
|
| 122 |
+
|
| 123 |
+
def determine_disposition(
|
| 124 |
+
self,
|
| 125 |
+
zone: Literal["burn-zone", "near-field", "far-field"],
|
| 126 |
+
condition: Literal["background", "light", "moderate", "heavy", "structural-damage"],
|
| 127 |
+
surface_type: Optional[str] = None,
|
| 128 |
+
use_rag: bool = True,
|
| 129 |
+
) -> DispositionResult:
|
| 130 |
+
"""Determine disposition for a zone/condition combination.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
zone: Zone classification
|
| 134 |
+
condition: Condition level
|
| 135 |
+
surface_type: Optional surface type for specific guidance
|
| 136 |
+
use_rag: Whether to retrieve additional context from RAG
|
| 137 |
+
|
| 138 |
+
Returns:
|
| 139 |
+
DispositionResult with disposition and protocol
|
| 140 |
+
"""
|
| 141 |
+
notes = []
|
| 142 |
+
|
| 143 |
+
# Handle background condition (any zone)
|
| 144 |
+
if condition == "background":
|
| 145 |
+
return DispositionResult(
|
| 146 |
+
zone=zone,
|
| 147 |
+
condition=condition,
|
| 148 |
+
disposition="no-action",
|
| 149 |
+
protocol="Document only",
|
| 150 |
+
confidence=1.0,
|
| 151 |
+
notes=["No visible contamination - document and proceed"],
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
# Handle structural damage (any zone)
|
| 155 |
+
if condition == "structural-damage":
|
| 156 |
+
return DispositionResult(
|
| 157 |
+
zone=zone,
|
| 158 |
+
condition=condition,
|
| 159 |
+
disposition="remove-repair",
|
| 160 |
+
protocol="Beyond cleaning scope",
|
| 161 |
+
confidence=1.0,
|
| 162 |
+
notes=["Structural damage requires repair before cleaning assessment"],
|
| 163 |
+
)
|
| 164 |
+
|
| 165 |
+
# Look up in disposition matrix
|
| 166 |
+
key = (zone, condition)
|
| 167 |
+
if key in DISPOSITION_MATRIX:
|
| 168 |
+
disposition, protocol = DISPOSITION_MATRIX[key]
|
| 169 |
+
else:
|
| 170 |
+
# Fallback for unexpected combinations
|
| 171 |
+
disposition = "evaluate"
|
| 172 |
+
protocol = "Professional judgment required"
|
| 173 |
+
notes.append("Combination not in standard matrix - requires evaluation")
|
| 174 |
+
|
| 175 |
+
# Determine protocol details
|
| 176 |
+
protocol_details = None
|
| 177 |
+
if "standard" in protocol.lower():
|
| 178 |
+
protocol_details = CLEANING_PROTOCOLS["standard"]
|
| 179 |
+
elif "aggressive" in protocol.lower():
|
| 180 |
+
protocol_details = CLEANING_PROTOCOLS["aggressive"]
|
| 181 |
+
elif "full" in protocol.lower():
|
| 182 |
+
protocol_details = CLEANING_PROTOCOLS["full"]
|
| 183 |
+
|
| 184 |
+
# Get RAG context if enabled
|
| 185 |
+
rag_context = None
|
| 186 |
+
if use_rag:
|
| 187 |
+
try:
|
| 188 |
+
results = self.retriever.retrieve_disposition(
|
| 189 |
+
zone=zone,
|
| 190 |
+
condition=condition,
|
| 191 |
+
material_type=surface_type,
|
| 192 |
+
)
|
| 193 |
+
if results:
|
| 194 |
+
rag_context = results[0].text[:500] # First result, truncated
|
| 195 |
+
notes.append(f"RAG context from: {results[0].source}")
|
| 196 |
+
except Exception as e:
|
| 197 |
+
notes.append(f"RAG lookup unavailable: {e}")
|
| 198 |
+
|
| 199 |
+
return DispositionResult(
|
| 200 |
+
zone=zone,
|
| 201 |
+
condition=condition,
|
| 202 |
+
disposition=disposition,
|
| 203 |
+
protocol=protocol,
|
| 204 |
+
protocol_details=protocol_details,
|
| 205 |
+
confidence=0.9 if disposition != "evaluate" else 0.6,
|
| 206 |
+
rag_context=rag_context,
|
| 207 |
+
notes=notes,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
def get_cleaning_method(
|
| 211 |
+
self,
|
| 212 |
+
surface_type: str,
|
| 213 |
+
condition: Literal["light", "moderate", "heavy"],
|
| 214 |
+
use_rag: bool = True,
|
| 215 |
+
) -> dict:
|
| 216 |
+
"""Get recommended cleaning method for a surface type.
|
| 217 |
+
|
| 218 |
+
Args:
|
| 219 |
+
surface_type: Type of surface (e.g., "drywall", "concrete")
|
| 220 |
+
condition: Contamination level
|
| 221 |
+
use_rag: Whether to retrieve from RAG
|
| 222 |
+
|
| 223 |
+
Returns:
|
| 224 |
+
Dictionary with cleaning method details
|
| 225 |
+
"""
|
| 226 |
+
# Default cleaning methods by surface type (from FDAM §5.2)
|
| 227 |
+
default_methods = {
|
| 228 |
+
"drywall": "HEPA vacuum → Dry sponge OR wet wipe",
|
| 229 |
+
"painted-drywall": "HEPA vacuum → Wet wipe with degreaser",
|
| 230 |
+
"concrete": "Scrubber machine + alkaline cleaner",
|
| 231 |
+
"concrete-floor": "Scrubber machine + alkaline cleaner",
|
| 232 |
+
"cmu": "HEPA vacuum → Wet wipe OR power wash",
|
| 233 |
+
"cmu-walls": "HEPA vacuum → Wet wipe OR power wash",
|
| 234 |
+
"metal": "Wet wipe → Rinse",
|
| 235 |
+
"metal-doors": "Wet wipe → Rinse",
|
| 236 |
+
"wood": "HEPA vacuum → Appropriate wood cleaner",
|
| 237 |
+
"glass": "Glass cleaner with lint-free cloth",
|
| 238 |
+
"carpet": "HEPA vacuum → Hot water extraction",
|
| 239 |
+
"hvac-ductwork": "Per NADCA ACR standards",
|
| 240 |
+
"ceiling-deck": "HEPA vacuum → Wet wipe (enhanced sampling required)",
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
# Normalize surface type
|
| 244 |
+
surface_lower = surface_type.lower().replace(" ", "-")
|
| 245 |
+
|
| 246 |
+
# Find best match
|
| 247 |
+
method = None
|
| 248 |
+
for key, value in default_methods.items():
|
| 249 |
+
if key in surface_lower or surface_lower in key:
|
| 250 |
+
method = value
|
| 251 |
+
break
|
| 252 |
+
|
| 253 |
+
if method is None:
|
| 254 |
+
method = "HEPA vacuum → Wet wipe (consult IH professional)"
|
| 255 |
+
|
| 256 |
+
# Enhance method based on condition
|
| 257 |
+
if condition == "heavy":
|
| 258 |
+
method = f"{method} (multiple passes, verification sampling)"
|
| 259 |
+
elif condition == "moderate":
|
| 260 |
+
method = f"{method} (consider additional pass)"
|
| 261 |
+
|
| 262 |
+
result = {
|
| 263 |
+
"surface_type": surface_type,
|
| 264 |
+
"condition": condition,
|
| 265 |
+
"method": method,
|
| 266 |
+
"source": "FDAM §5.2",
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
# Get RAG context for additional detail
|
| 270 |
+
if use_rag:
|
| 271 |
+
try:
|
| 272 |
+
rag_results = self.retriever.retrieve_cleaning_method(
|
| 273 |
+
surface_type=surface_type,
|
| 274 |
+
condition=condition,
|
| 275 |
+
)
|
| 276 |
+
if rag_results:
|
| 277 |
+
result["rag_context"] = rag_results[0].text[:300]
|
| 278 |
+
result["rag_source"] = rag_results[0].source
|
| 279 |
+
except Exception as e:
|
| 280 |
+
logger.warning(f"RAG retrieval failed for cleaning method: {e}")
|
| 281 |
+
|
| 282 |
+
return result
|
| 283 |
+
|
| 284 |
+
def process_vision_results(
|
| 285 |
+
self,
|
| 286 |
+
vision_results: dict,
|
| 287 |
+
room_mapping: dict,
|
| 288 |
+
) -> list[SurfaceDisposition]:
|
| 289 |
+
"""Process vision analysis results into surface dispositions.
|
| 290 |
+
|
| 291 |
+
Args:
|
| 292 |
+
vision_results: Dictionary of image_id -> vision result
|
| 293 |
+
room_mapping: Dictionary of image_id -> room info
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
List of SurfaceDisposition for each analyzed surface
|
| 297 |
+
"""
|
| 298 |
+
dispositions = []
|
| 299 |
+
|
| 300 |
+
for image_id, result in vision_results.items():
|
| 301 |
+
room_info = room_mapping.get(image_id, {})
|
| 302 |
+
room_name = room_info.get("name", "Unknown Room")
|
| 303 |
+
|
| 304 |
+
# Extract zone and condition with fallback tracking
|
| 305 |
+
zone_data = result.get("zone", {})
|
| 306 |
+
zone = zone_data.get("classification") if zone_data else None
|
| 307 |
+
condition_data = result.get("condition", {})
|
| 308 |
+
condition = condition_data.get("level") if condition_data else None
|
| 309 |
+
|
| 310 |
+
# Track if fallbacks were used (affects confidence scoring)
|
| 311 |
+
fallback_used = False
|
| 312 |
+
if zone is None:
|
| 313 |
+
zone = "far-field"
|
| 314 |
+
fallback_used = True
|
| 315 |
+
logger.warning(f"Image {image_id}: Using fallback zone 'far-field'")
|
| 316 |
+
if condition is None:
|
| 317 |
+
condition = "light"
|
| 318 |
+
fallback_used = True
|
| 319 |
+
logger.warning(f"Image {image_id}: Using fallback condition 'light'")
|
| 320 |
+
|
| 321 |
+
# Flag for confidence scoring
|
| 322 |
+
if fallback_used:
|
| 323 |
+
result["_fallback_used"] = True
|
| 324 |
+
|
| 325 |
+
# Get materials detected
|
| 326 |
+
materials = result.get("materials", [])
|
| 327 |
+
if not materials:
|
| 328 |
+
materials = [{"type": "general-surface", "confidence": 0.8}]
|
| 329 |
+
result["_fallback_used"] = True
|
| 330 |
+
|
| 331 |
+
for material in materials:
|
| 332 |
+
material_type = material.get("type", "unknown")
|
| 333 |
+
|
| 334 |
+
# Get disposition
|
| 335 |
+
disp_result = self.determine_disposition(
|
| 336 |
+
zone=zone,
|
| 337 |
+
condition=condition,
|
| 338 |
+
surface_type=material_type,
|
| 339 |
+
use_rag=True,
|
| 340 |
+
)
|
| 341 |
+
|
| 342 |
+
# Get cleaning method
|
| 343 |
+
if condition != "background" and disp_result.disposition == "clean":
|
| 344 |
+
method_info = self.get_cleaning_method(
|
| 345 |
+
surface_type=material_type,
|
| 346 |
+
condition=condition,
|
| 347 |
+
)
|
| 348 |
+
cleaning_method = method_info["method"]
|
| 349 |
+
else:
|
| 350 |
+
cleaning_method = disp_result.protocol
|
| 351 |
+
|
| 352 |
+
dispositions.append(
|
| 353 |
+
SurfaceDisposition(
|
| 354 |
+
surface_type=material_type,
|
| 355 |
+
room_name=room_name,
|
| 356 |
+
zone=zone,
|
| 357 |
+
condition=condition,
|
| 358 |
+
disposition=disp_result.disposition,
|
| 359 |
+
cleaning_method=cleaning_method,
|
| 360 |
+
notes=disp_result.notes,
|
| 361 |
+
)
|
| 362 |
+
)
|
| 363 |
+
|
| 364 |
+
return dispositions
|
pipeline/generator.py
ADDED
|
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM Document Generator.
|
| 2 |
+
|
| 3 |
+
Generates Cleaning Specification / Scope of Work documents
|
| 4 |
+
with RAG-enhanced content from the FDAM knowledge base.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from dataclasses import dataclass
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
from ui.state import SessionState
|
| 12 |
+
from rag import FDAMRetriever, ChromaVectorStore
|
| 13 |
+
from .calculations import FDAMCalculator, AirFiltrationResult, SampleDensityResult, RegulatoryFlags
|
| 14 |
+
from .dispositions import DispositionEngine, SurfaceDisposition
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@dataclass
|
| 18 |
+
class GeneratedDocument:
|
| 19 |
+
"""A generated assessment document."""
|
| 20 |
+
|
| 21 |
+
markdown: str
|
| 22 |
+
title: str
|
| 23 |
+
generated_at: str
|
| 24 |
+
word_count: int
|
| 25 |
+
sections: list[str]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class DocumentGenerator:
|
| 29 |
+
"""Generates FDAM assessment documents with RAG enhancement."""
|
| 30 |
+
|
| 31 |
+
def __init__(
|
| 32 |
+
self,
|
| 33 |
+
calculator: Optional[FDAMCalculator] = None,
|
| 34 |
+
disposition_engine: Optional[DispositionEngine] = None,
|
| 35 |
+
retriever: Optional[FDAMRetriever] = None,
|
| 36 |
+
):
|
| 37 |
+
"""Initialize document generator.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
calculator: FDAM calculator instance
|
| 41 |
+
disposition_engine: Disposition engine instance
|
| 42 |
+
retriever: RAG retriever instance
|
| 43 |
+
"""
|
| 44 |
+
self.calculator = calculator or FDAMCalculator()
|
| 45 |
+
self.disposition_engine = disposition_engine or DispositionEngine()
|
| 46 |
+
self._retriever = retriever
|
| 47 |
+
|
| 48 |
+
@property
|
| 49 |
+
def retriever(self) -> FDAMRetriever:
|
| 50 |
+
"""Get or create RAG retriever."""
|
| 51 |
+
if self._retriever is None:
|
| 52 |
+
try:
|
| 53 |
+
vs = ChromaVectorStore(persist_directory="chroma_db")
|
| 54 |
+
self._retriever = FDAMRetriever(vectorstore=vs)
|
| 55 |
+
except Exception:
|
| 56 |
+
self._retriever = FDAMRetriever()
|
| 57 |
+
return self._retriever
|
| 58 |
+
|
| 59 |
+
def generate_sow(
|
| 60 |
+
self,
|
| 61 |
+
session: SessionState,
|
| 62 |
+
vision_results: dict,
|
| 63 |
+
surface_dispositions: list[SurfaceDisposition],
|
| 64 |
+
calculations: dict,
|
| 65 |
+
) -> GeneratedDocument:
|
| 66 |
+
"""Generate Scope of Work document.
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
session: Current session state
|
| 70 |
+
vision_results: Vision analysis results by image ID
|
| 71 |
+
surface_dispositions: List of surface dispositions
|
| 72 |
+
calculations: Calculation results from FDAMCalculator
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
GeneratedDocument with markdown content
|
| 76 |
+
"""
|
| 77 |
+
sections = []
|
| 78 |
+
|
| 79 |
+
# Header
|
| 80 |
+
header = self._generate_header(session)
|
| 81 |
+
sections.append(header)
|
| 82 |
+
|
| 83 |
+
# Project Information
|
| 84 |
+
project_info = self._generate_project_info(session)
|
| 85 |
+
sections.append(project_info)
|
| 86 |
+
|
| 87 |
+
# Scope Summary
|
| 88 |
+
scope_summary = self._generate_scope_summary(session, calculations)
|
| 89 |
+
sections.append(scope_summary)
|
| 90 |
+
|
| 91 |
+
# Room Inventory
|
| 92 |
+
room_inventory = self._generate_room_inventory(session)
|
| 93 |
+
sections.append(room_inventory)
|
| 94 |
+
|
| 95 |
+
# Vision Analysis Summary
|
| 96 |
+
vision_summary = self._generate_vision_summary(session, vision_results)
|
| 97 |
+
sections.append(vision_summary)
|
| 98 |
+
|
| 99 |
+
# Field Observations
|
| 100 |
+
observations = self._generate_observations(session)
|
| 101 |
+
sections.append(observations)
|
| 102 |
+
|
| 103 |
+
# Disposition Summary
|
| 104 |
+
disposition_summary = self._generate_disposition_summary(surface_dispositions)
|
| 105 |
+
sections.append(disposition_summary)
|
| 106 |
+
|
| 107 |
+
# Cleaning Specifications
|
| 108 |
+
cleaning_specs = self._generate_cleaning_specs(surface_dispositions, calculations)
|
| 109 |
+
sections.append(cleaning_specs)
|
| 110 |
+
|
| 111 |
+
# Air Filtration Requirements
|
| 112 |
+
air_filtration = self._generate_air_filtration(calculations)
|
| 113 |
+
sections.append(air_filtration)
|
| 114 |
+
|
| 115 |
+
# Sampling Plan
|
| 116 |
+
sampling_plan = self._generate_sampling_plan(calculations, session)
|
| 117 |
+
sections.append(sampling_plan)
|
| 118 |
+
|
| 119 |
+
# Regulatory Requirements
|
| 120 |
+
regulatory = self._generate_regulatory_section(calculations)
|
| 121 |
+
sections.append(regulatory)
|
| 122 |
+
|
| 123 |
+
# Clearance Thresholds
|
| 124 |
+
thresholds = self._generate_thresholds_section(calculations)
|
| 125 |
+
sections.append(thresholds)
|
| 126 |
+
|
| 127 |
+
# Disclaimer and Footer
|
| 128 |
+
footer = self._generate_footer()
|
| 129 |
+
sections.append(footer)
|
| 130 |
+
|
| 131 |
+
# Combine all sections
|
| 132 |
+
markdown = "\n\n---\n\n".join(sections)
|
| 133 |
+
|
| 134 |
+
return GeneratedDocument(
|
| 135 |
+
markdown=markdown,
|
| 136 |
+
title=f"SOW - {session.project.project_name}",
|
| 137 |
+
generated_at=datetime.now().isoformat(),
|
| 138 |
+
word_count=len(markdown.split()),
|
| 139 |
+
sections=[
|
| 140 |
+
"Header", "Project Info", "Scope Summary", "Room Inventory",
|
| 141 |
+
"Vision Analysis", "Observations", "Dispositions",
|
| 142 |
+
"Cleaning Specs", "Air Filtration", "Sampling Plan",
|
| 143 |
+
"Regulatory", "Thresholds", "Footer"
|
| 144 |
+
],
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
def _generate_header(self, session: SessionState) -> str:
|
| 148 |
+
"""Generate document header."""
|
| 149 |
+
return f"""# Cleaning Specification / Scope of Work
|
| 150 |
+
|
| 151 |
+
**Project:** {session.project.project_name}
|
| 152 |
+
**Prepared For:** {session.project.client_name}
|
| 153 |
+
**Date:** {datetime.now().strftime('%B %d, %Y')}
|
| 154 |
+
**Document Version:** FDAM v4.0.1"""
|
| 155 |
+
|
| 156 |
+
def _generate_project_info(self, session: SessionState) -> str:
|
| 157 |
+
"""Generate project information section."""
|
| 158 |
+
p = session.project
|
| 159 |
+
return f"""## Project Information
|
| 160 |
+
|
| 161 |
+
| Field | Value |
|
| 162 |
+
|-------|-------|
|
| 163 |
+
| **Project Name** | {p.project_name} |
|
| 164 |
+
| **Address** | {p.address}, {p.city}, {p.state} {p.zip_code} |
|
| 165 |
+
| **Client** | {p.client_name} |
|
| 166 |
+
| **Fire Date** | {p.fire_date} |
|
| 167 |
+
| **Assessment Date** | {p.assessment_date} |
|
| 168 |
+
| **Facility Classification** | {p.facility_classification or 'Not specified'} |
|
| 169 |
+
| **Construction Era** | {p.construction_era or 'Not specified'} |
|
| 170 |
+
| **Assessor** | {p.assessor_name} {p.assessor_credentials or ''} |"""
|
| 171 |
+
|
| 172 |
+
def _generate_scope_summary(self, session: SessionState, calculations: dict) -> str:
|
| 173 |
+
"""Generate scope summary section."""
|
| 174 |
+
air = calculations.get("air_filtration")
|
| 175 |
+
sample = calculations.get("sample_density")
|
| 176 |
+
|
| 177 |
+
return f"""## Scope Summary
|
| 178 |
+
|
| 179 |
+
| Metric | Value |
|
| 180 |
+
|--------|-------|
|
| 181 |
+
| **Total Rooms/Areas** | {len(session.rooms)} |
|
| 182 |
+
| **Total Floor Area** | {calculations['total_area_sf']:,.0f} SF |
|
| 183 |
+
| **Total Volume** | {calculations['total_volume_cf']:,.0f} CF |
|
| 184 |
+
| **Images Analyzed** | {len(session.images)} |
|
| 185 |
+
| **Air Scrubbers Required** | {air.units_required if air else 'N/A'} units |
|
| 186 |
+
| **Est. Tape Lifts** | {sample.tape_lifts_min}-{sample.tape_lifts_max if sample else 'N/A'} |
|
| 187 |
+
| **Est. Surface Wipes** | {sample.surface_wipes_min}-{sample.surface_wipes_max if sample else 'N/A'} |"""
|
| 188 |
+
|
| 189 |
+
def _generate_room_inventory(self, session: SessionState) -> str:
|
| 190 |
+
"""Generate room inventory table."""
|
| 191 |
+
lines = ["## Room Inventory", ""]
|
| 192 |
+
lines.append("| Room/Area | Dimensions | Area (SF) | Volume (CF) |")
|
| 193 |
+
lines.append("|-----------|------------|-----------|-------------|")
|
| 194 |
+
|
| 195 |
+
for room in session.rooms:
|
| 196 |
+
area = room.length_ft * room.width_ft
|
| 197 |
+
volume = area * room.ceiling_height_ft
|
| 198 |
+
lines.append(
|
| 199 |
+
f"| {room.name} | {room.length_ft:.0f}' × {room.width_ft:.0f}' × "
|
| 200 |
+
f"{room.ceiling_height_ft:.0f}' | {area:,.0f} | {volume:,.0f} |"
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
return "\n".join(lines)
|
| 204 |
+
|
| 205 |
+
def _generate_vision_summary(self, session: SessionState, vision_results: dict) -> str:
|
| 206 |
+
"""Generate AI vision analysis summary."""
|
| 207 |
+
lines = ["## AI Vision Analysis Summary", ""]
|
| 208 |
+
|
| 209 |
+
if not vision_results:
|
| 210 |
+
lines.append("*No images analyzed.*")
|
| 211 |
+
return "\n".join(lines)
|
| 212 |
+
|
| 213 |
+
lines.append("| Image | Zone | Condition | Confidence |")
|
| 214 |
+
lines.append("|-------|------|-----------|------------|")
|
| 215 |
+
|
| 216 |
+
for img_meta in session.images:
|
| 217 |
+
result = vision_results.get(img_meta.id, {})
|
| 218 |
+
zone = result.get("zone", {})
|
| 219 |
+
condition = result.get("condition", {})
|
| 220 |
+
|
| 221 |
+
zone_class = zone.get("classification", "N/A")
|
| 222 |
+
zone_conf = zone.get("confidence", 0)
|
| 223 |
+
cond_level = condition.get("level", "N/A")
|
| 224 |
+
cond_conf = condition.get("confidence", 0)
|
| 225 |
+
|
| 226 |
+
lines.append(
|
| 227 |
+
f"| {img_meta.filename} | {zone_class} ({zone_conf:.0%}) | "
|
| 228 |
+
f"{cond_level} ({cond_conf:.0%}) | {(zone_conf + cond_conf) / 2:.0%} |"
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
return "\n".join(lines)
|
| 232 |
+
|
| 233 |
+
def _generate_observations(self, session: SessionState) -> str:
|
| 234 |
+
"""Generate field observations section."""
|
| 235 |
+
obs = session.observations
|
| 236 |
+
lines = ["## Field Observations", ""]
|
| 237 |
+
|
| 238 |
+
items = []
|
| 239 |
+
if obs.smoke_fire_odor:
|
| 240 |
+
items.append(f"- **Smoke/Fire Odor:** {obs.odor_intensity or 'Present'}")
|
| 241 |
+
if obs.visible_soot_deposits:
|
| 242 |
+
items.append(f"- **Visible Soot:** {obs.soot_pattern_description or 'Present'}")
|
| 243 |
+
if obs.large_char_particles:
|
| 244 |
+
items.append(f"- **Char Particles:** {obs.char_density_estimate or 'Present'}")
|
| 245 |
+
if obs.ash_like_residue:
|
| 246 |
+
items.append(f"- **Ash Residue:** {obs.ash_color_texture or 'Present'}")
|
| 247 |
+
if obs.surface_discoloration:
|
| 248 |
+
items.append(f"- **Discoloration:** {obs.discoloration_description or 'Present'}")
|
| 249 |
+
if obs.wildfire_indicators:
|
| 250 |
+
items.append(f"- **Wildfire Indicators:** {obs.wildfire_notes or 'Present'}")
|
| 251 |
+
if obs.dust_loading_interference:
|
| 252 |
+
items.append(f"- **Dust/Debris:** {obs.dust_notes or 'Present'}")
|
| 253 |
+
if obs.additional_notes:
|
| 254 |
+
items.append(f"- **Additional Notes:** {obs.additional_notes}")
|
| 255 |
+
|
| 256 |
+
if items:
|
| 257 |
+
lines.extend(items)
|
| 258 |
+
else:
|
| 259 |
+
lines.append("*No significant observations noted.*")
|
| 260 |
+
|
| 261 |
+
return "\n".join(lines)
|
| 262 |
+
|
| 263 |
+
def _generate_disposition_summary(self, dispositions: list[SurfaceDisposition]) -> str:
|
| 264 |
+
"""Generate disposition summary table."""
|
| 265 |
+
lines = ["## Disposition Summary", ""]
|
| 266 |
+
|
| 267 |
+
if not dispositions:
|
| 268 |
+
lines.append("*No dispositions determined.*")
|
| 269 |
+
return "\n".join(lines)
|
| 270 |
+
|
| 271 |
+
lines.append("| Room | Surface | Zone | Condition | Disposition |")
|
| 272 |
+
lines.append("|------|---------|------|-----------|-------------|")
|
| 273 |
+
|
| 274 |
+
for disp in dispositions:
|
| 275 |
+
lines.append(
|
| 276 |
+
f"| {disp.room_name} | {disp.surface_type} | {disp.zone} | "
|
| 277 |
+
f"{disp.condition} | {disp.disposition.upper()} |"
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
return "\n".join(lines)
|
| 281 |
+
|
| 282 |
+
def _generate_cleaning_specs(
|
| 283 |
+
self,
|
| 284 |
+
dispositions: list[SurfaceDisposition],
|
| 285 |
+
calculations: dict,
|
| 286 |
+
) -> str:
|
| 287 |
+
"""Generate cleaning specifications section."""
|
| 288 |
+
lines = ["## Cleaning Specifications", ""]
|
| 289 |
+
|
| 290 |
+
# Group by disposition
|
| 291 |
+
by_disposition = {}
|
| 292 |
+
for disp in dispositions:
|
| 293 |
+
key = disp.disposition
|
| 294 |
+
if key not in by_disposition:
|
| 295 |
+
by_disposition[key] = []
|
| 296 |
+
by_disposition[key].append(disp)
|
| 297 |
+
|
| 298 |
+
for disposition, items in by_disposition.items():
|
| 299 |
+
lines.append(f"### {disposition.upper().replace('-', ' ')} Surfaces")
|
| 300 |
+
lines.append("")
|
| 301 |
+
|
| 302 |
+
for item in items:
|
| 303 |
+
lines.append(f"**{item.room_name} - {item.surface_type}:**")
|
| 304 |
+
lines.append(f"- Method: {item.cleaning_method}")
|
| 305 |
+
if item.notes:
|
| 306 |
+
lines.append(f"- Notes: {'; '.join(item.notes)}")
|
| 307 |
+
lines.append("")
|
| 308 |
+
|
| 309 |
+
return "\n".join(lines)
|
| 310 |
+
|
| 311 |
+
def _generate_air_filtration(self, calculations: dict) -> str:
|
| 312 |
+
"""Generate air filtration requirements section."""
|
| 313 |
+
air: AirFiltrationResult = calculations.get("air_filtration")
|
| 314 |
+
|
| 315 |
+
if not air:
|
| 316 |
+
return "## Air Filtration Requirements\n\n*Calculation unavailable.*"
|
| 317 |
+
|
| 318 |
+
return f"""## Air Filtration Requirements
|
| 319 |
+
|
| 320 |
+
Per NADCA ACR 2021, Section 3.6:
|
| 321 |
+
|
| 322 |
+
| Parameter | Value |
|
| 323 |
+
|-----------|-------|
|
| 324 |
+
| **Required ACH** | {air.required_ach} air changes per hour |
|
| 325 |
+
| **Total Volume** | {air.total_volume_cf:,.0f} CF |
|
| 326 |
+
| **Unit Capacity** | {air.unit_cfm:,} CFM |
|
| 327 |
+
| **Units Required** | {air.units_required} |
|
| 328 |
+
|
| 329 |
+
**Calculation:** {air.calculation_notes}
|
| 330 |
+
|
| 331 |
+
**Placement Notes:**
|
| 332 |
+
- Distribute units evenly throughout work area
|
| 333 |
+
- Ensure adequate negative air pressure
|
| 334 |
+
- Exhaust to exterior when possible"""
|
| 335 |
+
|
| 336 |
+
def _generate_sampling_plan(self, calculations: dict, session: SessionState) -> str:
|
| 337 |
+
"""Generate sampling plan section."""
|
| 338 |
+
sample: SampleDensityResult = calculations.get("sample_density")
|
| 339 |
+
|
| 340 |
+
if not sample:
|
| 341 |
+
return "## Sampling Plan\n\n*Calculation unavailable.*"
|
| 342 |
+
|
| 343 |
+
lines = ["## Sampling Plan", ""]
|
| 344 |
+
lines.append("### Pre-Cleaning Characterization")
|
| 345 |
+
lines.append("")
|
| 346 |
+
lines.append("| Sample Type | Quantity | Notes |")
|
| 347 |
+
lines.append("|-------------|----------|-------|")
|
| 348 |
+
lines.append(
|
| 349 |
+
f"| Tape Lifts | {sample.tape_lifts_min}-{sample.tape_lifts_max} | "
|
| 350 |
+
"Per surface type, per room"
|
| 351 |
+
)
|
| 352 |
+
lines.append(
|
| 353 |
+
f"| Surface Wipes | {sample.surface_wipes_min}-{sample.surface_wipes_max} | "
|
| 354 |
+
"Metals analysis"
|
| 355 |
+
)
|
| 356 |
+
if sample.ceiling_deck_samples > 0:
|
| 357 |
+
lines.append(
|
| 358 |
+
f"| Ceiling Deck | {sample.ceiling_deck_samples} | "
|
| 359 |
+
"Enhanced per FDAM §4.5"
|
| 360 |
+
)
|
| 361 |
+
lines.append("")
|
| 362 |
+
|
| 363 |
+
if sample.notes:
|
| 364 |
+
lines.append("**Notes:**")
|
| 365 |
+
for note in sample.notes:
|
| 366 |
+
lines.append(f"- {note}")
|
| 367 |
+
lines.append("")
|
| 368 |
+
|
| 369 |
+
lines.append("### Post-Cleaning Verification (PRV)")
|
| 370 |
+
lines.append("")
|
| 371 |
+
lines.append("PRV sampling locations should mirror pre-cleaning characterization.")
|
| 372 |
+
lines.append("Minimum 50% of original sample locations for initial clearance attempt.")
|
| 373 |
+
|
| 374 |
+
return "\n".join(lines)
|
| 375 |
+
|
| 376 |
+
def _generate_regulatory_section(self, calculations: dict) -> str:
|
| 377 |
+
"""Generate regulatory requirements section."""
|
| 378 |
+
flags: RegulatoryFlags = calculations.get("regulatory_flags")
|
| 379 |
+
|
| 380 |
+
lines = ["## Regulatory Requirements", ""]
|
| 381 |
+
|
| 382 |
+
if not flags or not flags.notes:
|
| 383 |
+
lines.append("*No specific regulatory flags identified.*")
|
| 384 |
+
return "\n".join(lines)
|
| 385 |
+
|
| 386 |
+
for note in flags.notes:
|
| 387 |
+
lines.append(f"- {note}")
|
| 388 |
+
|
| 389 |
+
if flags.lbp_survey_required:
|
| 390 |
+
lines.append("")
|
| 391 |
+
lines.append(
|
| 392 |
+
"**Lead-Based Paint:** Per 29 CFR 1926.62, LBP survey must be completed "
|
| 393 |
+
"prior to disturbance of painted surfaces in pre-1978 construction."
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
if flags.acm_survey_required or flags.acm_survey_recommended:
|
| 397 |
+
lines.append("")
|
| 398 |
+
action = "required" if flags.acm_survey_required else "recommended"
|
| 399 |
+
lines.append(
|
| 400 |
+
f"**Asbestos:** ACM survey {action} per NESHAP regulations. "
|
| 401 |
+
"No disturbance of suspect materials until survey complete."
|
| 402 |
+
)
|
| 403 |
+
|
| 404 |
+
return "\n".join(lines)
|
| 405 |
+
|
| 406 |
+
def _generate_thresholds_section(self, calculations: dict) -> str:
|
| 407 |
+
"""Generate clearance thresholds section."""
|
| 408 |
+
thresholds = calculations.get("metals_thresholds")
|
| 409 |
+
particulates = calculations.get("particulate_thresholds", {})
|
| 410 |
+
|
| 411 |
+
lines = ["## Clearance Thresholds", ""]
|
| 412 |
+
lines.append(f"**Facility Type:** {thresholds.facility_type if thresholds else 'N/A'}")
|
| 413 |
+
lines.append("")
|
| 414 |
+
|
| 415 |
+
if thresholds:
|
| 416 |
+
lines.append("### Metals (Surface Wipe)")
|
| 417 |
+
lines.append("")
|
| 418 |
+
lines.append("| Metal | Threshold | Unit |")
|
| 419 |
+
lines.append("|-------|-----------|------|")
|
| 420 |
+
lines.append(f"| Lead (Pb) | {thresholds.lead_ug_100cm2} | µg/100cm² |")
|
| 421 |
+
lines.append(f"| Cadmium (Cd) | {thresholds.cadmium_ug_100cm2} | µg/100cm² |")
|
| 422 |
+
lines.append(f"| Arsenic (As) | {thresholds.arsenic_ug_100cm2} | µg/100cm² |")
|
| 423 |
+
lines.append(f"| Chromium VI | {thresholds.chromium_vi_ug_100cm2} | µg/100cm² |")
|
| 424 |
+
lines.append(f"| Beryllium (Be) | {thresholds.beryllium_ug_100cm2} | µg/100cm² |")
|
| 425 |
+
lines.append("")
|
| 426 |
+
lines.append(f"*Source: {thresholds.source}*")
|
| 427 |
+
lines.append("")
|
| 428 |
+
|
| 429 |
+
if particulates:
|
| 430 |
+
lines.append("### Particulates (Tape Lift)")
|
| 431 |
+
lines.append("")
|
| 432 |
+
lines.append("| Particle Type | Threshold | Unit |")
|
| 433 |
+
lines.append("|---------------|-----------|------|")
|
| 434 |
+
ash_char = particulates.get("ash_char", {})
|
| 435 |
+
soot = particulates.get("aciniform_soot", {})
|
| 436 |
+
lines.append(
|
| 437 |
+
f"| Ash/Char | <{ash_char.get('clearance', 150)} | "
|
| 438 |
+
f"{ash_char.get('unit', 'cts/cm²')} |"
|
| 439 |
+
)
|
| 440 |
+
lines.append(
|
| 441 |
+
f"| Aciniform Soot | <{soot.get('clearance', 500)} | "
|
| 442 |
+
f"{soot.get('unit', 'cts/cm²')} |"
|
| 443 |
+
)
|
| 444 |
+
lines.append("")
|
| 445 |
+
lines.append(f"*Source: {ash_char.get('source', 'FDAM §1.5')}*")
|
| 446 |
+
|
| 447 |
+
return "\n".join(lines)
|
| 448 |
+
|
| 449 |
+
def _generate_footer(self) -> str:
|
| 450 |
+
"""Generate document footer with disclaimer."""
|
| 451 |
+
return f"""## Disclaimer
|
| 452 |
+
|
| 453 |
+
This document was generated using AI-assisted analysis per the Fire Damage Assessment
|
| 454 |
+
Methodology (FDAM) v4.0.1. All recommendations should be reviewed by a qualified
|
| 455 |
+
industrial hygienist before implementation.
|
| 456 |
+
|
| 457 |
+
**Important Notes:**
|
| 458 |
+
- Visual assessments require laboratory confirmation for definitive particle identification
|
| 459 |
+
- Threshold values are subject to regulatory updates
|
| 460 |
+
- Site-specific conditions may require deviation from standard protocols
|
| 461 |
+
- Reclean/retest procedures apply per FDAM §4.7 if clearance is not achieved
|
| 462 |
+
|
| 463 |
+
---
|
| 464 |
+
|
| 465 |
+
*Generated by FDAM AI Pipeline v4.0.1*
|
| 466 |
+
*{datetime.now().strftime('%Y-%m-%d %H:%M')}*"""
|
pipeline/main.py
ADDED
|
@@ -0,0 +1,334 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM Pipeline Orchestrator.
|
| 2 |
+
|
| 3 |
+
Coordinates the 6-stage processing pipeline:
|
| 4 |
+
1. Input Validation
|
| 5 |
+
2. Vision Analysis
|
| 6 |
+
3. RAG Retrieval
|
| 7 |
+
4. FDAM Logic (Dispositions)
|
| 8 |
+
5. Calculations
|
| 9 |
+
6. Document Generation
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import logging
|
| 13 |
+
from dataclasses import dataclass, field
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
from typing import Callable, Optional
|
| 16 |
+
from PIL import Image
|
| 17 |
+
import io
|
| 18 |
+
|
| 19 |
+
from ui.state import SessionState
|
| 20 |
+
from ui.components import image_store
|
| 21 |
+
from models.loader import get_models
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
from rag import FDAMRetriever, ChromaVectorStore
|
| 25 |
+
|
| 26 |
+
from .calculations import FDAMCalculator
|
| 27 |
+
from .dispositions import DispositionEngine, SurfaceDisposition
|
| 28 |
+
from .generator import DocumentGenerator, GeneratedDocument
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@dataclass
|
| 32 |
+
class PipelineProgress:
|
| 33 |
+
"""Progress information for pipeline execution."""
|
| 34 |
+
|
| 35 |
+
stage: int
|
| 36 |
+
total_stages: int
|
| 37 |
+
stage_name: str
|
| 38 |
+
percent: float
|
| 39 |
+
message: str
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@dataclass
|
| 43 |
+
class VisionResult:
|
| 44 |
+
"""Result from vision analysis of a single image."""
|
| 45 |
+
|
| 46 |
+
image_id: str
|
| 47 |
+
filename: str
|
| 48 |
+
room_id: str
|
| 49 |
+
zone: dict
|
| 50 |
+
condition: dict
|
| 51 |
+
materials: list[dict]
|
| 52 |
+
bounding_boxes: list[dict]
|
| 53 |
+
raw_response: dict
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@dataclass
|
| 57 |
+
class PipelineResult:
|
| 58 |
+
"""Complete result from pipeline execution."""
|
| 59 |
+
|
| 60 |
+
success: bool
|
| 61 |
+
session: SessionState
|
| 62 |
+
vision_results: dict[str, VisionResult]
|
| 63 |
+
dispositions: list[SurfaceDisposition]
|
| 64 |
+
calculations: dict
|
| 65 |
+
document: Optional[GeneratedDocument]
|
| 66 |
+
annotated_images: list[tuple] # (PIL.Image, caption)
|
| 67 |
+
errors: list[str] = field(default_factory=list)
|
| 68 |
+
warnings: list[str] = field(default_factory=list)
|
| 69 |
+
execution_time_seconds: float = 0.0
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
ProgressCallback = Callable[[PipelineProgress], None]
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class FDAMPipeline:
|
| 76 |
+
"""Main FDAM processing pipeline."""
|
| 77 |
+
|
| 78 |
+
STAGES = [
|
| 79 |
+
"Validating inputs",
|
| 80 |
+
"Analyzing images",
|
| 81 |
+
"Retrieving context",
|
| 82 |
+
"Applying FDAM logic",
|
| 83 |
+
"Running calculations",
|
| 84 |
+
"Generating documents",
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
def __init__(
|
| 88 |
+
self,
|
| 89 |
+
calculator: Optional[FDAMCalculator] = None,
|
| 90 |
+
disposition_engine: Optional[DispositionEngine] = None,
|
| 91 |
+
generator: Optional[DocumentGenerator] = None,
|
| 92 |
+
retriever: Optional[FDAMRetriever] = None,
|
| 93 |
+
):
|
| 94 |
+
"""Initialize pipeline with optional component overrides.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
calculator: FDAM calculator instance
|
| 98 |
+
disposition_engine: Disposition engine instance
|
| 99 |
+
generator: Document generator instance
|
| 100 |
+
retriever: RAG retriever instance
|
| 101 |
+
"""
|
| 102 |
+
self.calculator = calculator or FDAMCalculator()
|
| 103 |
+
self._retriever = retriever
|
| 104 |
+
self.disposition_engine = disposition_engine or DispositionEngine(
|
| 105 |
+
retriever=self._retriever
|
| 106 |
+
)
|
| 107 |
+
self.generator = generator or DocumentGenerator(
|
| 108 |
+
calculator=self.calculator,
|
| 109 |
+
disposition_engine=self.disposition_engine,
|
| 110 |
+
retriever=self._retriever,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
@property
|
| 114 |
+
def retriever(self) -> FDAMRetriever:
|
| 115 |
+
"""Get or create RAG retriever."""
|
| 116 |
+
if self._retriever is None:
|
| 117 |
+
try:
|
| 118 |
+
vs = ChromaVectorStore(persist_directory="chroma_db")
|
| 119 |
+
self._retriever = FDAMRetriever(vectorstore=vs)
|
| 120 |
+
except Exception as e:
|
| 121 |
+
logger.warning(f"ChromaDB init failed, using fallback retriever: {e}")
|
| 122 |
+
self._retriever = FDAMRetriever()
|
| 123 |
+
return self._retriever
|
| 124 |
+
|
| 125 |
+
def execute(
|
| 126 |
+
self,
|
| 127 |
+
session: SessionState,
|
| 128 |
+
progress_callback: Optional[ProgressCallback] = None,
|
| 129 |
+
) -> PipelineResult:
|
| 130 |
+
"""Execute the full FDAM pipeline.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
session: Session state with all input data
|
| 134 |
+
progress_callback: Optional callback for progress updates
|
| 135 |
+
|
| 136 |
+
Returns:
|
| 137 |
+
PipelineResult with all outputs
|
| 138 |
+
"""
|
| 139 |
+
start_time = datetime.now()
|
| 140 |
+
errors = []
|
| 141 |
+
warnings = []
|
| 142 |
+
|
| 143 |
+
def report_progress(stage: int, message: str = ""):
|
| 144 |
+
if progress_callback:
|
| 145 |
+
progress_callback(
|
| 146 |
+
PipelineProgress(
|
| 147 |
+
stage=stage,
|
| 148 |
+
total_stages=len(self.STAGES),
|
| 149 |
+
stage_name=self.STAGES[stage - 1] if stage > 0 else "Starting",
|
| 150 |
+
percent=stage / len(self.STAGES),
|
| 151 |
+
message=message,
|
| 152 |
+
)
|
| 153 |
+
)
|
| 154 |
+
|
| 155 |
+
# Stage 1: Input Validation
|
| 156 |
+
report_progress(1, "Validating inputs...")
|
| 157 |
+
can_generate, validation_errors = session.can_generate()
|
| 158 |
+
|
| 159 |
+
# Check images in store
|
| 160 |
+
expected_ids = [img.id for img in session.images]
|
| 161 |
+
missing_ids = image_store.get_missing_ids(expected_ids)
|
| 162 |
+
|
| 163 |
+
if not can_generate or missing_ids:
|
| 164 |
+
errors.extend(validation_errors)
|
| 165 |
+
if missing_ids:
|
| 166 |
+
errors.append(f"{len(missing_ids)} image(s) need to be re-uploaded")
|
| 167 |
+
|
| 168 |
+
return PipelineResult(
|
| 169 |
+
success=False,
|
| 170 |
+
session=session,
|
| 171 |
+
vision_results={},
|
| 172 |
+
dispositions=[],
|
| 173 |
+
calculations={},
|
| 174 |
+
document=None,
|
| 175 |
+
annotated_images=[],
|
| 176 |
+
errors=errors,
|
| 177 |
+
execution_time_seconds=(datetime.now() - start_time).total_seconds(),
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Stage 2: Vision Analysis
|
| 181 |
+
report_progress(2, "Analyzing images with AI...")
|
| 182 |
+
model_stack = get_models()
|
| 183 |
+
vision_results = {}
|
| 184 |
+
annotated_images = []
|
| 185 |
+
room_mapping = {}
|
| 186 |
+
|
| 187 |
+
for i, img_meta in enumerate(session.images):
|
| 188 |
+
img_bytes = image_store.get(img_meta.id)
|
| 189 |
+
if not img_bytes:
|
| 190 |
+
warnings.append(f"Image {img_meta.filename} not found in store")
|
| 191 |
+
continue
|
| 192 |
+
|
| 193 |
+
try:
|
| 194 |
+
pil_image = Image.open(io.BytesIO(img_bytes))
|
| 195 |
+
|
| 196 |
+
# Run vision analysis
|
| 197 |
+
result = model_stack.vision.analyze_image(
|
| 198 |
+
pil_image,
|
| 199 |
+
img_meta.description or "",
|
| 200 |
+
)
|
| 201 |
+
|
| 202 |
+
vision_result = VisionResult(
|
| 203 |
+
image_id=img_meta.id,
|
| 204 |
+
filename=img_meta.filename,
|
| 205 |
+
room_id=img_meta.room_id,
|
| 206 |
+
zone=result.get("zone", {}),
|
| 207 |
+
condition=result.get("condition", {}),
|
| 208 |
+
materials=result.get("materials", []),
|
| 209 |
+
bounding_boxes=result.get("bounding_boxes", []),
|
| 210 |
+
raw_response=result,
|
| 211 |
+
)
|
| 212 |
+
vision_results[img_meta.id] = vision_result
|
| 213 |
+
|
| 214 |
+
# Build room mapping
|
| 215 |
+
room_info = next(
|
| 216 |
+
(r for r in session.rooms if r.id == img_meta.room_id),
|
| 217 |
+
None,
|
| 218 |
+
)
|
| 219 |
+
room_mapping[img_meta.id] = {
|
| 220 |
+
"name": room_info.name if room_info else "Unknown",
|
| 221 |
+
"id": img_meta.room_id,
|
| 222 |
+
}
|
| 223 |
+
|
| 224 |
+
# Create annotated image caption
|
| 225 |
+
zone_class = result.get("zone", {}).get("classification", "N/A")
|
| 226 |
+
zone_conf = result.get("zone", {}).get("confidence", 0)
|
| 227 |
+
caption = f"{img_meta.filename}\nZone: {zone_class} ({zone_conf:.0%})"
|
| 228 |
+
annotated_images.append((pil_image, caption))
|
| 229 |
+
|
| 230 |
+
report_progress(
|
| 231 |
+
2,
|
| 232 |
+
f"Analyzed {i + 1}/{len(session.images)}: {img_meta.filename}",
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
except Exception as e:
|
| 236 |
+
warnings.append(f"Error analyzing {img_meta.filename}: {e}")
|
| 237 |
+
|
| 238 |
+
# Stage 3: RAG Retrieval
|
| 239 |
+
report_progress(3, "Retrieving FDAM methodology context...")
|
| 240 |
+
# RAG is integrated into disposition engine, just verify connection
|
| 241 |
+
try:
|
| 242 |
+
_ = self.retriever.retrieve("test connection", top_k=1)
|
| 243 |
+
except Exception as e:
|
| 244 |
+
warnings.append(f"RAG retrieval unavailable: {e}")
|
| 245 |
+
|
| 246 |
+
# Stage 4: FDAM Logic (Dispositions)
|
| 247 |
+
report_progress(4, "Applying disposition logic...")
|
| 248 |
+
|
| 249 |
+
# Convert vision results to dict format for disposition engine
|
| 250 |
+
vision_dict = {
|
| 251 |
+
img_id: {
|
| 252 |
+
"zone": vr.zone,
|
| 253 |
+
"condition": vr.condition,
|
| 254 |
+
"materials": vr.materials,
|
| 255 |
+
}
|
| 256 |
+
for img_id, vr in vision_results.items()
|
| 257 |
+
}
|
| 258 |
+
|
| 259 |
+
dispositions = self.disposition_engine.process_vision_results(
|
| 260 |
+
vision_results=vision_dict,
|
| 261 |
+
room_mapping=room_mapping,
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
# Stage 5: Calculations
|
| 265 |
+
report_progress(5, "Running FDAM calculations...")
|
| 266 |
+
calculations = self.calculator.calculate_from_session(session)
|
| 267 |
+
|
| 268 |
+
# Stage 6: Document Generation
|
| 269 |
+
report_progress(6, "Generating documents...")
|
| 270 |
+
document = self.generator.generate_sow(
|
| 271 |
+
session=session,
|
| 272 |
+
vision_results=vision_dict,
|
| 273 |
+
surface_dispositions=dispositions,
|
| 274 |
+
calculations=calculations,
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
# Update session
|
| 278 |
+
session.has_results = True
|
| 279 |
+
session.results_generated_at = datetime.now().isoformat()
|
| 280 |
+
session.update_timestamp()
|
| 281 |
+
|
| 282 |
+
execution_time = (datetime.now() - start_time).total_seconds()
|
| 283 |
+
|
| 284 |
+
return PipelineResult(
|
| 285 |
+
success=True,
|
| 286 |
+
session=session,
|
| 287 |
+
vision_results=vision_results,
|
| 288 |
+
dispositions=dispositions,
|
| 289 |
+
calculations=calculations,
|
| 290 |
+
document=document,
|
| 291 |
+
annotated_images=annotated_images,
|
| 292 |
+
errors=errors,
|
| 293 |
+
warnings=warnings,
|
| 294 |
+
execution_time_seconds=execution_time,
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
def generate_stats_dict(self, result: PipelineResult) -> dict:
|
| 298 |
+
"""Generate statistics dictionary for UI display.
|
| 299 |
+
|
| 300 |
+
Args:
|
| 301 |
+
result: Pipeline execution result
|
| 302 |
+
|
| 303 |
+
Returns:
|
| 304 |
+
Dictionary with stats for JSON display
|
| 305 |
+
"""
|
| 306 |
+
calc = result.calculations
|
| 307 |
+
air = calc.get("air_filtration")
|
| 308 |
+
sample = calc.get("sample_density")
|
| 309 |
+
reg = calc.get("regulatory_flags")
|
| 310 |
+
thresholds = calc.get("metals_thresholds")
|
| 311 |
+
|
| 312 |
+
# Count dispositions by type
|
| 313 |
+
disp_counts = {}
|
| 314 |
+
for d in result.dispositions:
|
| 315 |
+
disp_counts[d.disposition] = disp_counts.get(d.disposition, 0) + 1
|
| 316 |
+
|
| 317 |
+
return {
|
| 318 |
+
"project_name": result.session.project.project_name,
|
| 319 |
+
"facility_classification": result.session.project.facility_classification,
|
| 320 |
+
"construction_era": result.session.project.construction_era,
|
| 321 |
+
"total_rooms": len(result.session.rooms),
|
| 322 |
+
"total_images": len(result.session.images),
|
| 323 |
+
"images_analyzed": len(result.vision_results),
|
| 324 |
+
"total_floor_area_sf": f"{calc.get('total_area_sf', 0):,.0f}",
|
| 325 |
+
"total_volume_cf": f"{calc.get('total_volume_cf', 0):,.0f}",
|
| 326 |
+
"air_scrubbers_required": air.units_required if air else 0,
|
| 327 |
+
"tape_lifts_recommended": f"{sample.tape_lifts_min}-{sample.tape_lifts_max}" if sample else "N/A",
|
| 328 |
+
"surface_wipes_recommended": f"{sample.surface_wipes_min}-{sample.surface_wipes_max}" if sample else "N/A",
|
| 329 |
+
"disposition_counts": disp_counts,
|
| 330 |
+
"regulatory_flags": reg.notes if reg else [],
|
| 331 |
+
"lead_threshold": f"{thresholds.lead_ug_100cm2} µg/100cm²" if thresholds else "N/A",
|
| 332 |
+
"execution_time": f"{result.execution_time_seconds:.1f}s",
|
| 333 |
+
"warnings": result.warnings,
|
| 334 |
+
}
|
pipeline/pdf_generator.py
ADDED
|
@@ -0,0 +1,315 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""PDF Generator using WeasyPrint.
|
| 2 |
+
|
| 3 |
+
Converts Markdown SOW documents to professional PDF format.
|
| 4 |
+
Uses markdown → HTML → PDF pipeline with WeasyPrint.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import tempfile
|
| 8 |
+
from dataclasses import dataclass
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Optional
|
| 11 |
+
import markdown
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@dataclass
|
| 15 |
+
class PDFResult:
|
| 16 |
+
"""Result of PDF generation."""
|
| 17 |
+
|
| 18 |
+
success: bool
|
| 19 |
+
pdf_path: Optional[str]
|
| 20 |
+
error_message: Optional[str] = None
|
| 21 |
+
file_size_bytes: int = 0
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
# Professional CSS styling for SOW documents
|
| 25 |
+
SOW_CSS = """
|
| 26 |
+
@page {
|
| 27 |
+
size: letter;
|
| 28 |
+
margin: 0.75in;
|
| 29 |
+
@top-center {
|
| 30 |
+
content: "FDAM Assessment Report";
|
| 31 |
+
font-size: 9pt;
|
| 32 |
+
color: #666;
|
| 33 |
+
}
|
| 34 |
+
@bottom-center {
|
| 35 |
+
content: "Page " counter(page) " of " counter(pages);
|
| 36 |
+
font-size: 9pt;
|
| 37 |
+
color: #666;
|
| 38 |
+
}
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
body {
|
| 42 |
+
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
|
| 43 |
+
font-size: 11pt;
|
| 44 |
+
line-height: 1.5;
|
| 45 |
+
color: #333;
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
h1 {
|
| 49 |
+
font-size: 20pt;
|
| 50 |
+
color: #1a1a1a;
|
| 51 |
+
border-bottom: 2px solid #0066cc;
|
| 52 |
+
padding-bottom: 8px;
|
| 53 |
+
margin-top: 0;
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
+
h2 {
|
| 57 |
+
font-size: 14pt;
|
| 58 |
+
color: #0066cc;
|
| 59 |
+
margin-top: 20px;
|
| 60 |
+
border-bottom: 1px solid #ddd;
|
| 61 |
+
padding-bottom: 4px;
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
h3 {
|
| 65 |
+
font-size: 12pt;
|
| 66 |
+
color: #333;
|
| 67 |
+
margin-top: 15px;
|
| 68 |
+
}
|
| 69 |
+
|
| 70 |
+
table {
|
| 71 |
+
width: 100%;
|
| 72 |
+
border-collapse: collapse;
|
| 73 |
+
margin: 15px 0;
|
| 74 |
+
font-size: 10pt;
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
th {
|
| 78 |
+
background-color: #0066cc;
|
| 79 |
+
color: white;
|
| 80 |
+
padding: 8px 10px;
|
| 81 |
+
text-align: left;
|
| 82 |
+
font-weight: bold;
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
td {
|
| 86 |
+
padding: 6px 10px;
|
| 87 |
+
border-bottom: 1px solid #ddd;
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
tr:nth-child(even) {
|
| 91 |
+
background-color: #f8f9fa;
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
tr:hover {
|
| 95 |
+
background-color: #e9ecef;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
ul, ol {
|
| 99 |
+
margin: 10px 0;
|
| 100 |
+
padding-left: 25px;
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
li {
|
| 104 |
+
margin: 4px 0;
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
strong {
|
| 108 |
+
color: #1a1a1a;
|
| 109 |
+
}
|
| 110 |
+
|
| 111 |
+
code {
|
| 112 |
+
background-color: #f4f4f4;
|
| 113 |
+
padding: 2px 5px;
|
| 114 |
+
border-radius: 3px;
|
| 115 |
+
font-size: 10pt;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
hr {
|
| 119 |
+
border: none;
|
| 120 |
+
border-top: 1px solid #ddd;
|
| 121 |
+
margin: 20px 0;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
.disclaimer {
|
| 125 |
+
background-color: #fff3cd;
|
| 126 |
+
border: 1px solid #ffc107;
|
| 127 |
+
padding: 12px;
|
| 128 |
+
border-radius: 4px;
|
| 129 |
+
font-size: 10pt;
|
| 130 |
+
margin-top: 20px;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
em {
|
| 134 |
+
color: #666;
|
| 135 |
+
}
|
| 136 |
+
"""
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class PDFGenerator:
|
| 140 |
+
"""Generates PDF documents from Markdown using WeasyPrint."""
|
| 141 |
+
|
| 142 |
+
def __init__(self, custom_css: Optional[str] = None):
|
| 143 |
+
"""Initialize PDF generator.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
custom_css: Optional custom CSS to override default styling
|
| 147 |
+
"""
|
| 148 |
+
self.css = custom_css or SOW_CSS
|
| 149 |
+
self._weasyprint_available = None
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def weasyprint_available(self) -> bool:
|
| 153 |
+
"""Check if WeasyPrint is available."""
|
| 154 |
+
if self._weasyprint_available is None:
|
| 155 |
+
try:
|
| 156 |
+
from weasyprint import HTML
|
| 157 |
+
self._weasyprint_available = True
|
| 158 |
+
except ImportError:
|
| 159 |
+
self._weasyprint_available = False
|
| 160 |
+
return self._weasyprint_available
|
| 161 |
+
|
| 162 |
+
def markdown_to_html(self, markdown_content: str) -> str:
|
| 163 |
+
"""Convert Markdown to HTML with styling.
|
| 164 |
+
|
| 165 |
+
Args:
|
| 166 |
+
markdown_content: Markdown text
|
| 167 |
+
|
| 168 |
+
Returns:
|
| 169 |
+
Complete HTML document with CSS
|
| 170 |
+
"""
|
| 171 |
+
# Convert markdown to HTML
|
| 172 |
+
md = markdown.Markdown(
|
| 173 |
+
extensions=[
|
| 174 |
+
"tables",
|
| 175 |
+
"fenced_code",
|
| 176 |
+
"toc",
|
| 177 |
+
]
|
| 178 |
+
)
|
| 179 |
+
html_body = md.convert(markdown_content)
|
| 180 |
+
|
| 181 |
+
# Wrap in complete HTML document with CSS
|
| 182 |
+
html = f"""<!DOCTYPE html>
|
| 183 |
+
<html>
|
| 184 |
+
<head>
|
| 185 |
+
<meta charset="utf-8">
|
| 186 |
+
<style>
|
| 187 |
+
{self.css}
|
| 188 |
+
</style>
|
| 189 |
+
</head>
|
| 190 |
+
<body>
|
| 191 |
+
{html_body}
|
| 192 |
+
</body>
|
| 193 |
+
</html>"""
|
| 194 |
+
return html
|
| 195 |
+
|
| 196 |
+
def generate_pdf(
|
| 197 |
+
self,
|
| 198 |
+
markdown_content: str,
|
| 199 |
+
output_path: Optional[str] = None,
|
| 200 |
+
) -> PDFResult:
|
| 201 |
+
"""Generate PDF from Markdown content.
|
| 202 |
+
|
| 203 |
+
Args:
|
| 204 |
+
markdown_content: Markdown text to convert
|
| 205 |
+
output_path: Optional output file path. If None, uses temp file.
|
| 206 |
+
|
| 207 |
+
Returns:
|
| 208 |
+
PDFResult with success status and file path
|
| 209 |
+
"""
|
| 210 |
+
if not self.weasyprint_available:
|
| 211 |
+
return PDFResult(
|
| 212 |
+
success=False,
|
| 213 |
+
pdf_path=None,
|
| 214 |
+
error_message="WeasyPrint is not installed. Run: pip install weasyprint",
|
| 215 |
+
)
|
| 216 |
+
|
| 217 |
+
try:
|
| 218 |
+
from weasyprint import HTML
|
| 219 |
+
|
| 220 |
+
# Convert markdown to styled HTML
|
| 221 |
+
html_content = self.markdown_to_html(markdown_content)
|
| 222 |
+
|
| 223 |
+
# Determine output path
|
| 224 |
+
if output_path is None:
|
| 225 |
+
output_file = tempfile.NamedTemporaryFile(
|
| 226 |
+
suffix=".pdf",
|
| 227 |
+
delete=False,
|
| 228 |
+
prefix="SOW_",
|
| 229 |
+
)
|
| 230 |
+
output_path = output_file.name
|
| 231 |
+
output_file.close()
|
| 232 |
+
|
| 233 |
+
# Generate PDF
|
| 234 |
+
HTML(string=html_content).write_pdf(output_path)
|
| 235 |
+
|
| 236 |
+
# Verify file was created
|
| 237 |
+
pdf_path = Path(output_path)
|
| 238 |
+
if not pdf_path.exists():
|
| 239 |
+
return PDFResult(
|
| 240 |
+
success=False,
|
| 241 |
+
pdf_path=None,
|
| 242 |
+
error_message="PDF file was not created",
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
return PDFResult(
|
| 246 |
+
success=True,
|
| 247 |
+
pdf_path=str(pdf_path),
|
| 248 |
+
file_size_bytes=pdf_path.stat().st_size,
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
except Exception as e:
|
| 252 |
+
return PDFResult(
|
| 253 |
+
success=False,
|
| 254 |
+
pdf_path=None,
|
| 255 |
+
error_message=f"PDF generation failed: {str(e)}",
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
def generate_html(
|
| 259 |
+
self,
|
| 260 |
+
markdown_content: str,
|
| 261 |
+
output_path: Optional[str] = None,
|
| 262 |
+
) -> tuple[bool, Optional[str], Optional[str]]:
|
| 263 |
+
"""Generate HTML from Markdown (fallback if PDF fails).
|
| 264 |
+
|
| 265 |
+
Args:
|
| 266 |
+
markdown_content: Markdown text
|
| 267 |
+
output_path: Optional output path
|
| 268 |
+
|
| 269 |
+
Returns:
|
| 270 |
+
Tuple of (success, file_path, error_message)
|
| 271 |
+
"""
|
| 272 |
+
try:
|
| 273 |
+
html_content = self.markdown_to_html(markdown_content)
|
| 274 |
+
|
| 275 |
+
if output_path is None:
|
| 276 |
+
output_file = tempfile.NamedTemporaryFile(
|
| 277 |
+
mode="w",
|
| 278 |
+
suffix=".html",
|
| 279 |
+
delete=False,
|
| 280 |
+
prefix="SOW_",
|
| 281 |
+
encoding="utf-8",
|
| 282 |
+
)
|
| 283 |
+
output_path = output_file.name
|
| 284 |
+
output_file.write(html_content)
|
| 285 |
+
output_file.close()
|
| 286 |
+
else:
|
| 287 |
+
with open(output_path, "w", encoding="utf-8") as f:
|
| 288 |
+
f.write(html_content)
|
| 289 |
+
|
| 290 |
+
return True, output_path, None
|
| 291 |
+
|
| 292 |
+
except Exception as e:
|
| 293 |
+
return False, None, str(e)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
def generate_sow_pdf(
|
| 297 |
+
markdown_content: str,
|
| 298 |
+
project_name: str,
|
| 299 |
+
output_path: Optional[str] = None,
|
| 300 |
+
) -> PDFResult:
|
| 301 |
+
"""Convenience function to generate SOW PDF.
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
markdown_content: SOW markdown content
|
| 305 |
+
project_name: Project name for filename
|
| 306 |
+
output_path: Optional output path
|
| 307 |
+
|
| 308 |
+
Returns:
|
| 309 |
+
PDFResult with success status
|
| 310 |
+
"""
|
| 311 |
+
generator = PDFGenerator()
|
| 312 |
+
return generator.generate_pdf(
|
| 313 |
+
markdown_content=markdown_content,
|
| 314 |
+
output_path=output_path,
|
| 315 |
+
)
|
rag/__init__.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""RAG (Retrieval Augmented Generation) module for FDAM AI Pipeline.
|
| 2 |
+
|
| 3 |
+
This module provides document chunking, vector storage, and retrieval
|
| 4 |
+
for the FDAM knowledge base.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .chunker import SemanticChunker, Chunk
|
| 8 |
+
from .vectorstore import ChromaVectorStore
|
| 9 |
+
from .retriever import FDAMRetriever
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"SemanticChunker",
|
| 13 |
+
"Chunk",
|
| 14 |
+
"ChromaVectorStore",
|
| 15 |
+
"FDAMRetriever",
|
| 16 |
+
]
|
rag/chunker.py
ADDED
|
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Semantic chunker with table preservation for FDAM knowledge base.
|
| 2 |
+
|
| 3 |
+
Chunking rules:
|
| 4 |
+
- Keep markdown tables intact (never split)
|
| 5 |
+
- Preserve headers with content for context
|
| 6 |
+
- Target 400-600 tokens per chunk
|
| 7 |
+
- Include metadata (source, category, section, priority)
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import re
|
| 11 |
+
from dataclasses import dataclass, field
|
| 12 |
+
from typing import Literal
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class Chunk:
|
| 18 |
+
"""A chunk of text with metadata for RAG indexing."""
|
| 19 |
+
|
| 20 |
+
id: str
|
| 21 |
+
text: str
|
| 22 |
+
source: str # Filename
|
| 23 |
+
category: Literal[
|
| 24 |
+
"methodology",
|
| 25 |
+
"thresholds",
|
| 26 |
+
"lab-methods",
|
| 27 |
+
"cleaning-procedures",
|
| 28 |
+
"wildfire",
|
| 29 |
+
"safety",
|
| 30 |
+
]
|
| 31 |
+
section: str # Section header path (e.g., "4.1 Zone Classification")
|
| 32 |
+
priority: Literal["primary", "reference-threshold", "reference-narrative"]
|
| 33 |
+
content_type: Literal["narrative", "table", "list", "mixed"]
|
| 34 |
+
keywords: list[str] = field(default_factory=list)
|
| 35 |
+
|
| 36 |
+
def to_metadata(self) -> dict:
|
| 37 |
+
"""Convert to metadata dict for ChromaDB."""
|
| 38 |
+
return {
|
| 39 |
+
"source": self.source,
|
| 40 |
+
"category": self.category,
|
| 41 |
+
"section": self.section,
|
| 42 |
+
"priority": self.priority,
|
| 43 |
+
"content_type": self.content_type,
|
| 44 |
+
"keywords": ",".join(self.keywords),
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
class SemanticChunker:
|
| 49 |
+
"""Chunks markdown documents while preserving tables and semantic structure."""
|
| 50 |
+
|
| 51 |
+
# Approximate tokens per character (conservative estimate)
|
| 52 |
+
CHARS_PER_TOKEN = 4
|
| 53 |
+
TARGET_MIN_TOKENS = 400
|
| 54 |
+
TARGET_MAX_TOKENS = 600
|
| 55 |
+
|
| 56 |
+
def __init__(self):
|
| 57 |
+
self.target_min_chars = self.TARGET_MIN_TOKENS * self.CHARS_PER_TOKEN
|
| 58 |
+
self.target_max_chars = self.TARGET_MAX_TOKENS * self.CHARS_PER_TOKEN
|
| 59 |
+
|
| 60 |
+
def chunk_document(
|
| 61 |
+
self,
|
| 62 |
+
text: str,
|
| 63 |
+
source: str,
|
| 64 |
+
category: Literal[
|
| 65 |
+
"methodology",
|
| 66 |
+
"thresholds",
|
| 67 |
+
"lab-methods",
|
| 68 |
+
"cleaning-procedures",
|
| 69 |
+
"wildfire",
|
| 70 |
+
"safety",
|
| 71 |
+
],
|
| 72 |
+
priority: Literal["primary", "reference-threshold", "reference-narrative"],
|
| 73 |
+
) -> list[Chunk]:
|
| 74 |
+
"""Chunk a markdown document into semantic units.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
text: Full document text (markdown format)
|
| 78 |
+
source: Source filename
|
| 79 |
+
category: Document category
|
| 80 |
+
priority: Document priority level
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
List of Chunk objects ready for indexing
|
| 84 |
+
"""
|
| 85 |
+
# Split into sections by headers
|
| 86 |
+
sections = self._split_by_headers(text)
|
| 87 |
+
|
| 88 |
+
chunks = []
|
| 89 |
+
chunk_counter = 0
|
| 90 |
+
|
| 91 |
+
# Accumulator that persists across sections
|
| 92 |
+
current_chunk_text = ""
|
| 93 |
+
current_content_types: set[str] = set()
|
| 94 |
+
current_section = "Introduction" # Track primary section for metadata
|
| 95 |
+
|
| 96 |
+
for section_header, section_content in sections:
|
| 97 |
+
# Split section into blocks (paragraphs, tables, lists)
|
| 98 |
+
blocks = self._split_into_blocks(section_content)
|
| 99 |
+
|
| 100 |
+
for block_text, block_type in blocks:
|
| 101 |
+
block_len = len(block_text)
|
| 102 |
+
|
| 103 |
+
# Tables are never split - flush current and add table as own chunk
|
| 104 |
+
if block_type == "table":
|
| 105 |
+
# Flush current chunk if it meets minimum size
|
| 106 |
+
if current_chunk_text.strip() and len(current_chunk_text) >= self.target_min_chars:
|
| 107 |
+
chunks.append(
|
| 108 |
+
self._create_chunk(
|
| 109 |
+
chunk_id=f"{source}_{chunk_counter}",
|
| 110 |
+
text=current_chunk_text.strip(),
|
| 111 |
+
source=source,
|
| 112 |
+
category=category,
|
| 113 |
+
section=current_section,
|
| 114 |
+
priority=priority,
|
| 115 |
+
content_types=current_content_types,
|
| 116 |
+
)
|
| 117 |
+
)
|
| 118 |
+
chunk_counter += 1
|
| 119 |
+
current_chunk_text = ""
|
| 120 |
+
current_content_types = set()
|
| 121 |
+
current_section = section_header
|
| 122 |
+
elif current_chunk_text.strip():
|
| 123 |
+
# Below minimum - prepend to table context
|
| 124 |
+
pass # Keep accumulating, table will have its own chunk
|
| 125 |
+
|
| 126 |
+
# Add table as its own chunk (tables always standalone)
|
| 127 |
+
table_text = f"{section_header}\n\n{block_text}".strip()
|
| 128 |
+
# If we have small accumulated content, prepend it to give context
|
| 129 |
+
if current_chunk_text.strip() and len(current_chunk_text) < self.target_min_chars:
|
| 130 |
+
table_text = current_chunk_text.strip() + "\n\n" + table_text
|
| 131 |
+
current_chunk_text = ""
|
| 132 |
+
current_content_types = set()
|
| 133 |
+
|
| 134 |
+
chunks.append(
|
| 135 |
+
self._create_chunk(
|
| 136 |
+
chunk_id=f"{source}_{chunk_counter}",
|
| 137 |
+
text=table_text,
|
| 138 |
+
source=source,
|
| 139 |
+
category=category,
|
| 140 |
+
section=section_header,
|
| 141 |
+
priority=priority,
|
| 142 |
+
content_types={"table"},
|
| 143 |
+
)
|
| 144 |
+
)
|
| 145 |
+
chunk_counter += 1
|
| 146 |
+
current_section = section_header
|
| 147 |
+
continue
|
| 148 |
+
|
| 149 |
+
# Check if adding this block exceeds target max
|
| 150 |
+
potential_len = len(current_chunk_text) + block_len + len(section_header) + 4
|
| 151 |
+
|
| 152 |
+
if potential_len > self.target_max_chars and len(current_chunk_text) >= self.target_min_chars:
|
| 153 |
+
# Flush current chunk - it's large enough
|
| 154 |
+
chunks.append(
|
| 155 |
+
self._create_chunk(
|
| 156 |
+
chunk_id=f"{source}_{chunk_counter}",
|
| 157 |
+
text=current_chunk_text.strip(),
|
| 158 |
+
source=source,
|
| 159 |
+
category=category,
|
| 160 |
+
section=current_section,
|
| 161 |
+
priority=priority,
|
| 162 |
+
content_types=current_content_types,
|
| 163 |
+
)
|
| 164 |
+
)
|
| 165 |
+
chunk_counter += 1
|
| 166 |
+
# Start new chunk with section header
|
| 167 |
+
current_chunk_text = f"{section_header}\n\n"
|
| 168 |
+
current_content_types = set()
|
| 169 |
+
current_section = section_header
|
| 170 |
+
|
| 171 |
+
# Add section header if starting fresh or new section
|
| 172 |
+
if not current_chunk_text.strip():
|
| 173 |
+
current_chunk_text = f"{section_header}\n\n"
|
| 174 |
+
current_section = section_header
|
| 175 |
+
elif section_header != current_section and section_header not in current_chunk_text:
|
| 176 |
+
# Add new section header inline for context
|
| 177 |
+
current_chunk_text += f"\n{section_header}\n\n"
|
| 178 |
+
|
| 179 |
+
current_chunk_text += block_text + "\n\n"
|
| 180 |
+
current_content_types.add(block_type)
|
| 181 |
+
|
| 182 |
+
# Flush remaining content (regardless of size - it's the end)
|
| 183 |
+
if current_chunk_text.strip():
|
| 184 |
+
chunks.append(
|
| 185 |
+
self._create_chunk(
|
| 186 |
+
chunk_id=f"{source}_{chunk_counter}",
|
| 187 |
+
text=current_chunk_text.strip(),
|
| 188 |
+
source=source,
|
| 189 |
+
category=category,
|
| 190 |
+
section=current_section,
|
| 191 |
+
priority=priority,
|
| 192 |
+
content_types=current_content_types,
|
| 193 |
+
)
|
| 194 |
+
)
|
| 195 |
+
|
| 196 |
+
return chunks
|
| 197 |
+
|
| 198 |
+
def _split_by_headers(self, text: str) -> list[tuple[str, str]]:
|
| 199 |
+
"""Split document by markdown headers (## and ###).
|
| 200 |
+
|
| 201 |
+
Returns list of (header, content) tuples.
|
| 202 |
+
"""
|
| 203 |
+
# Match ## or ### headers
|
| 204 |
+
header_pattern = r"^(#{2,3}\s+.+)$"
|
| 205 |
+
lines = text.split("\n")
|
| 206 |
+
|
| 207 |
+
sections = []
|
| 208 |
+
current_header = "Introduction"
|
| 209 |
+
current_content = []
|
| 210 |
+
|
| 211 |
+
for line in lines:
|
| 212 |
+
if re.match(header_pattern, line):
|
| 213 |
+
# Save previous section
|
| 214 |
+
if current_content:
|
| 215 |
+
sections.append((current_header, "\n".join(current_content)))
|
| 216 |
+
current_header = line.strip()
|
| 217 |
+
current_content = []
|
| 218 |
+
else:
|
| 219 |
+
current_content.append(line)
|
| 220 |
+
|
| 221 |
+
# Save final section
|
| 222 |
+
if current_content:
|
| 223 |
+
sections.append((current_header, "\n".join(current_content)))
|
| 224 |
+
|
| 225 |
+
return sections
|
| 226 |
+
|
| 227 |
+
def _split_into_blocks(self, text: str) -> list[tuple[str, str]]:
|
| 228 |
+
"""Split section content into blocks (paragraphs, tables, lists).
|
| 229 |
+
|
| 230 |
+
Returns list of (block_text, block_type) tuples.
|
| 231 |
+
"""
|
| 232 |
+
blocks = []
|
| 233 |
+
lines = text.split("\n")
|
| 234 |
+
current_block = []
|
| 235 |
+
current_type = "narrative"
|
| 236 |
+
in_table = False
|
| 237 |
+
|
| 238 |
+
for line in lines:
|
| 239 |
+
# Detect table start/end
|
| 240 |
+
if line.strip().startswith("|") and "|" in line[1:]:
|
| 241 |
+
if not in_table:
|
| 242 |
+
# Flush current block
|
| 243 |
+
if current_block:
|
| 244 |
+
block_text = "\n".join(current_block).strip()
|
| 245 |
+
if block_text:
|
| 246 |
+
blocks.append((block_text, current_type))
|
| 247 |
+
current_block = []
|
| 248 |
+
in_table = True
|
| 249 |
+
current_type = "table"
|
| 250 |
+
current_block.append(line)
|
| 251 |
+
elif in_table:
|
| 252 |
+
# Table ended
|
| 253 |
+
block_text = "\n".join(current_block).strip()
|
| 254 |
+
if block_text:
|
| 255 |
+
blocks.append((block_text, "table"))
|
| 256 |
+
current_block = [line] if line.strip() else []
|
| 257 |
+
in_table = False
|
| 258 |
+
current_type = "narrative"
|
| 259 |
+
elif line.strip().startswith(("- ", "* ", "1. ", "2. ", "3. ")):
|
| 260 |
+
# List item
|
| 261 |
+
if current_type != "list" and current_block:
|
| 262 |
+
block_text = "\n".join(current_block).strip()
|
| 263 |
+
if block_text:
|
| 264 |
+
blocks.append((block_text, current_type))
|
| 265 |
+
current_block = []
|
| 266 |
+
current_type = "list"
|
| 267 |
+
current_block.append(line)
|
| 268 |
+
elif line.strip() == "" and current_block:
|
| 269 |
+
# Paragraph break
|
| 270 |
+
if not in_table:
|
| 271 |
+
block_text = "\n".join(current_block).strip()
|
| 272 |
+
if block_text:
|
| 273 |
+
blocks.append((block_text, current_type))
|
| 274 |
+
current_block = []
|
| 275 |
+
current_type = "narrative"
|
| 276 |
+
else:
|
| 277 |
+
if current_type == "list" and not line.strip().startswith(
|
| 278 |
+
("- ", "* ", " ")
|
| 279 |
+
):
|
| 280 |
+
# End of list
|
| 281 |
+
block_text = "\n".join(current_block).strip()
|
| 282 |
+
if block_text:
|
| 283 |
+
blocks.append((block_text, "list"))
|
| 284 |
+
current_block = []
|
| 285 |
+
current_type = "narrative"
|
| 286 |
+
current_block.append(line)
|
| 287 |
+
|
| 288 |
+
# Flush remaining
|
| 289 |
+
if current_block:
|
| 290 |
+
block_text = "\n".join(current_block).strip()
|
| 291 |
+
if block_text:
|
| 292 |
+
blocks.append((block_text, current_type))
|
| 293 |
+
|
| 294 |
+
return blocks
|
| 295 |
+
|
| 296 |
+
def _create_chunk(
|
| 297 |
+
self,
|
| 298 |
+
chunk_id: str,
|
| 299 |
+
text: str,
|
| 300 |
+
source: str,
|
| 301 |
+
category: str,
|
| 302 |
+
section: str,
|
| 303 |
+
priority: str,
|
| 304 |
+
content_types: set[str],
|
| 305 |
+
) -> Chunk:
|
| 306 |
+
"""Create a Chunk object with extracted keywords."""
|
| 307 |
+
# Determine primary content type
|
| 308 |
+
if "table" in content_types:
|
| 309 |
+
content_type = "table"
|
| 310 |
+
elif "list" in content_types and "narrative" in content_types:
|
| 311 |
+
content_type = "mixed"
|
| 312 |
+
elif "list" in content_types:
|
| 313 |
+
content_type = "list"
|
| 314 |
+
else:
|
| 315 |
+
content_type = "narrative"
|
| 316 |
+
|
| 317 |
+
# Extract keywords from text
|
| 318 |
+
keywords = self._extract_keywords(text)
|
| 319 |
+
|
| 320 |
+
return Chunk(
|
| 321 |
+
id=chunk_id,
|
| 322 |
+
text=text,
|
| 323 |
+
source=source,
|
| 324 |
+
category=category,
|
| 325 |
+
section=section,
|
| 326 |
+
priority=priority,
|
| 327 |
+
content_type=content_type,
|
| 328 |
+
keywords=keywords,
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
def _extract_keywords(self, text: str) -> list[str]:
|
| 332 |
+
"""Extract relevant keywords from chunk text."""
|
| 333 |
+
# Domain-specific keywords to look for
|
| 334 |
+
domain_terms = [
|
| 335 |
+
# Zone classifications
|
| 336 |
+
"burn zone",
|
| 337 |
+
"near-field",
|
| 338 |
+
"far-field",
|
| 339 |
+
# Condition levels
|
| 340 |
+
"background",
|
| 341 |
+
"light",
|
| 342 |
+
"moderate",
|
| 343 |
+
"heavy",
|
| 344 |
+
"structural damage",
|
| 345 |
+
# Dispositions
|
| 346 |
+
"no action",
|
| 347 |
+
"clean",
|
| 348 |
+
"evaluate",
|
| 349 |
+
"remove",
|
| 350 |
+
"remove/repair",
|
| 351 |
+
# Materials
|
| 352 |
+
"soot",
|
| 353 |
+
"char",
|
| 354 |
+
"ash",
|
| 355 |
+
"particulate",
|
| 356 |
+
"aciniform",
|
| 357 |
+
# Thresholds
|
| 358 |
+
"lead",
|
| 359 |
+
"cadmium",
|
| 360 |
+
"arsenic",
|
| 361 |
+
"metals",
|
| 362 |
+
"µg/100cm²",
|
| 363 |
+
"cts/cm²",
|
| 364 |
+
# Facility types
|
| 365 |
+
"operational",
|
| 366 |
+
"non-operational",
|
| 367 |
+
"public",
|
| 368 |
+
"childcare",
|
| 369 |
+
# Standards
|
| 370 |
+
"ach",
|
| 371 |
+
"nadca",
|
| 372 |
+
"epa",
|
| 373 |
+
"hud",
|
| 374 |
+
"osha",
|
| 375 |
+
# Sampling
|
| 376 |
+
"sampling",
|
| 377 |
+
"wipe",
|
| 378 |
+
"bulk",
|
| 379 |
+
"air",
|
| 380 |
+
"clearance",
|
| 381 |
+
# Lab methods
|
| 382 |
+
"plm",
|
| 383 |
+
"icp-ms",
|
| 384 |
+
"xrf",
|
| 385 |
+
"tapelift",
|
| 386 |
+
# Actions
|
| 387 |
+
"hepa",
|
| 388 |
+
"vacuum",
|
| 389 |
+
"deodorization",
|
| 390 |
+
"encapsulation",
|
| 391 |
+
]
|
| 392 |
+
|
| 393 |
+
text_lower = text.lower()
|
| 394 |
+
found_keywords = []
|
| 395 |
+
|
| 396 |
+
for term in domain_terms:
|
| 397 |
+
if term in text_lower:
|
| 398 |
+
found_keywords.append(term)
|
| 399 |
+
|
| 400 |
+
return found_keywords[:10] # Limit to top 10
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
def chunk_file(
|
| 404 |
+
filepath: Path,
|
| 405 |
+
category: Literal[
|
| 406 |
+
"methodology",
|
| 407 |
+
"thresholds",
|
| 408 |
+
"lab-methods",
|
| 409 |
+
"cleaning-procedures",
|
| 410 |
+
"wildfire",
|
| 411 |
+
"safety",
|
| 412 |
+
],
|
| 413 |
+
priority: Literal["primary", "reference-threshold", "reference-narrative"],
|
| 414 |
+
) -> list[Chunk]:
|
| 415 |
+
"""Convenience function to chunk a markdown file.
|
| 416 |
+
|
| 417 |
+
Args:
|
| 418 |
+
filepath: Path to markdown file
|
| 419 |
+
category: Document category
|
| 420 |
+
priority: Document priority level
|
| 421 |
+
|
| 422 |
+
Returns:
|
| 423 |
+
List of Chunk objects
|
| 424 |
+
"""
|
| 425 |
+
chunker = SemanticChunker()
|
| 426 |
+
text = filepath.read_text(encoding="utf-8")
|
| 427 |
+
return chunker.chunk_document(
|
| 428 |
+
text=text,
|
| 429 |
+
source=filepath.name,
|
| 430 |
+
category=category,
|
| 431 |
+
priority=priority,
|
| 432 |
+
)
|
rag/index_builder.py
ADDED
|
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Index builder for FDAM RAG knowledge base.
|
| 2 |
+
|
| 3 |
+
Processes markdown documents from RAG-KB/ and indexes them in ChromaDB.
|
| 4 |
+
|
| 5 |
+
Usage:
|
| 6 |
+
python -m rag.index_builder [--rebuild]
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import argparse
|
| 10 |
+
from pathlib import Path
|
| 11 |
+
|
| 12 |
+
from rag.chunker import SemanticChunker, Chunk
|
| 13 |
+
from rag.vectorstore import ChromaVectorStore
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# Document configuration: filename -> (category, priority)
|
| 17 |
+
DOCUMENT_CONFIG = {
|
| 18 |
+
# PRIMARY - FDAM Methodology (authoritative source)
|
| 19 |
+
"FDAM_v4_METHODOLOGY.md": ("methodology", "primary"),
|
| 20 |
+
# REFERENCE - Threshold Tables (critical for metals clearance)
|
| 21 |
+
"Metals clearance criteria-QVC.md": ("thresholds", "reference-threshold"),
|
| 22 |
+
# REFERENCE - Narrative (supporting documentation)
|
| 23 |
+
"air-o-cell-method-guide-atlas.md": ("lab-methods", "reference-narrative"),
|
| 24 |
+
"Industrial Hygiene Lab Services Guide.md": ("lab-methods", "reference-narrative"),
|
| 25 |
+
"Fire Remediation Processes and Methodologies_ A Review of Industry-Endorsed Standards.md": (
|
| 26 |
+
"cleaning-procedures",
|
| 27 |
+
"reference-narrative",
|
| 28 |
+
),
|
| 29 |
+
"Technical Guide for Wildfire Restoration - Key Information.md": (
|
| 30 |
+
"wildfire",
|
| 31 |
+
"reference-narrative",
|
| 32 |
+
),
|
| 33 |
+
"wildfire_soot_particulate_removal_full_text_extraction.md": (
|
| 34 |
+
"wildfire",
|
| 35 |
+
"reference-narrative",
|
| 36 |
+
),
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
# Files to skip (per user decision)
|
| 40 |
+
SKIP_FILES = {
|
| 41 |
+
"Lead Contamination in Indoor Firing_Gun Ranges _ Atlantic Environmental.pdf",
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def get_rag_kb_path() -> Path:
|
| 46 |
+
"""Get path to RAG-KB directory."""
|
| 47 |
+
# Try relative to this file first
|
| 48 |
+
this_dir = Path(__file__).parent
|
| 49 |
+
rag_kb = this_dir.parent / "RAG-KB"
|
| 50 |
+
if rag_kb.exists():
|
| 51 |
+
return rag_kb
|
| 52 |
+
|
| 53 |
+
# Try from current working directory
|
| 54 |
+
rag_kb = Path("RAG-KB")
|
| 55 |
+
if rag_kb.exists():
|
| 56 |
+
return rag_kb
|
| 57 |
+
|
| 58 |
+
raise FileNotFoundError("Could not find RAG-KB directory")
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def get_chroma_path() -> Path:
|
| 62 |
+
"""Get path to ChromaDB persistence directory."""
|
| 63 |
+
this_dir = Path(__file__).parent
|
| 64 |
+
chroma_path = this_dir.parent / "chroma_db"
|
| 65 |
+
return chroma_path
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def build_index(rebuild: bool = False) -> dict:
|
| 69 |
+
"""Build the RAG index from RAG-KB documents.
|
| 70 |
+
|
| 71 |
+
Args:
|
| 72 |
+
rebuild: If True, clear existing index before building
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
Statistics about the indexing operation
|
| 76 |
+
"""
|
| 77 |
+
rag_kb_path = get_rag_kb_path()
|
| 78 |
+
chroma_path = get_chroma_path()
|
| 79 |
+
|
| 80 |
+
print(f"RAG-KB path: {rag_kb_path}")
|
| 81 |
+
print(f"ChromaDB path: {chroma_path}")
|
| 82 |
+
|
| 83 |
+
# Initialize components
|
| 84 |
+
chunker = SemanticChunker()
|
| 85 |
+
vectorstore = ChromaVectorStore(persist_directory=str(chroma_path))
|
| 86 |
+
|
| 87 |
+
if rebuild:
|
| 88 |
+
print("Rebuilding index - clearing existing data...")
|
| 89 |
+
vectorstore.clear()
|
| 90 |
+
|
| 91 |
+
stats = {
|
| 92 |
+
"documents_processed": 0,
|
| 93 |
+
"documents_skipped": 0,
|
| 94 |
+
"chunks_created": 0,
|
| 95 |
+
"errors": [],
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
# Process markdown files
|
| 99 |
+
for md_file in rag_kb_path.glob("*.md"):
|
| 100 |
+
filename = md_file.name
|
| 101 |
+
|
| 102 |
+
# Skip files not in config or in skip list
|
| 103 |
+
if filename in SKIP_FILES:
|
| 104 |
+
print(f"Skipping (excluded): {filename}")
|
| 105 |
+
stats["documents_skipped"] += 1
|
| 106 |
+
continue
|
| 107 |
+
|
| 108 |
+
if filename not in DOCUMENT_CONFIG:
|
| 109 |
+
print(f"Skipping (not configured): {filename}")
|
| 110 |
+
stats["documents_skipped"] += 1
|
| 111 |
+
continue
|
| 112 |
+
|
| 113 |
+
category, priority = DOCUMENT_CONFIG[filename]
|
| 114 |
+
print(f"Processing: {filename} ({category}, {priority})")
|
| 115 |
+
|
| 116 |
+
try:
|
| 117 |
+
# Read and chunk document
|
| 118 |
+
text = md_file.read_text(encoding="utf-8")
|
| 119 |
+
chunks = chunker.chunk_document(
|
| 120 |
+
text=text,
|
| 121 |
+
source=filename,
|
| 122 |
+
category=category,
|
| 123 |
+
priority=priority,
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
# Check if source already indexed (for incremental updates)
|
| 127 |
+
existing_count = vectorstore.delete_by_source(filename)
|
| 128 |
+
if existing_count > 0:
|
| 129 |
+
print(f" Replaced {existing_count} existing chunks")
|
| 130 |
+
|
| 131 |
+
# Add to vectorstore
|
| 132 |
+
added = vectorstore.add_chunks(chunks)
|
| 133 |
+
print(f" Added {added} chunks")
|
| 134 |
+
|
| 135 |
+
stats["documents_processed"] += 1
|
| 136 |
+
stats["chunks_created"] += added
|
| 137 |
+
|
| 138 |
+
except Exception as e:
|
| 139 |
+
error_msg = f"Error processing {filename}: {e}"
|
| 140 |
+
print(f" ERROR: {e}")
|
| 141 |
+
stats["errors"].append(error_msg)
|
| 142 |
+
|
| 143 |
+
# Report on PDFs that need conversion
|
| 144 |
+
for pdf_file in rag_kb_path.glob("*.pdf"):
|
| 145 |
+
if pdf_file.name not in SKIP_FILES:
|
| 146 |
+
print(f"Note: PDF needs conversion to .md: {pdf_file.name}")
|
| 147 |
+
|
| 148 |
+
# Print summary
|
| 149 |
+
print("\n" + "=" * 50)
|
| 150 |
+
print("Index Build Complete")
|
| 151 |
+
print("=" * 50)
|
| 152 |
+
print(f"Documents processed: {stats['documents_processed']}")
|
| 153 |
+
print(f"Documents skipped: {stats['documents_skipped']}")
|
| 154 |
+
print(f"Total chunks created: {stats['chunks_created']}")
|
| 155 |
+
|
| 156 |
+
if stats["errors"]:
|
| 157 |
+
print(f"Errors: {len(stats['errors'])}")
|
| 158 |
+
for err in stats["errors"]:
|
| 159 |
+
print(f" - {err}")
|
| 160 |
+
|
| 161 |
+
# Print collection stats
|
| 162 |
+
collection_stats = vectorstore.get_stats()
|
| 163 |
+
print(f"\nCollection stats:")
|
| 164 |
+
print(f" Total chunks in DB: {collection_stats['total_chunks']}")
|
| 165 |
+
print(f" Categories: {collection_stats['categories']}")
|
| 166 |
+
print(f" Priorities: {collection_stats['priorities']}")
|
| 167 |
+
|
| 168 |
+
return stats
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def main():
|
| 172 |
+
"""CLI entry point."""
|
| 173 |
+
parser = argparse.ArgumentParser(
|
| 174 |
+
description="Build FDAM RAG knowledge base index"
|
| 175 |
+
)
|
| 176 |
+
parser.add_argument(
|
| 177 |
+
"--rebuild",
|
| 178 |
+
action="store_true",
|
| 179 |
+
help="Clear existing index and rebuild from scratch",
|
| 180 |
+
)
|
| 181 |
+
args = parser.parse_args()
|
| 182 |
+
|
| 183 |
+
build_index(rebuild=args.rebuild)
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
if __name__ == "__main__":
|
| 187 |
+
main()
|
rag/retriever.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM retriever with priority weighting and reranking.
|
| 2 |
+
|
| 3 |
+
Implements tiered retrieval:
|
| 4 |
+
1. Vector similarity search
|
| 5 |
+
2. Priority weighting (primary > reference-threshold > reference-narrative)
|
| 6 |
+
3. Optional reranking for production
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from typing import Optional
|
| 10 |
+
from dataclasses import dataclass
|
| 11 |
+
|
| 12 |
+
from config.settings import settings
|
| 13 |
+
from .vectorstore import ChromaVectorStore
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@dataclass
|
| 17 |
+
class RetrievalResult:
|
| 18 |
+
"""A single retrieval result with relevance score."""
|
| 19 |
+
|
| 20 |
+
chunk_id: str
|
| 21 |
+
text: str
|
| 22 |
+
source: str
|
| 23 |
+
category: str
|
| 24 |
+
section: str
|
| 25 |
+
priority: str
|
| 26 |
+
content_type: str
|
| 27 |
+
keywords: list[str]
|
| 28 |
+
similarity_score: float # 0-1, higher is better
|
| 29 |
+
weighted_score: float # After priority weighting
|
| 30 |
+
final_score: float # After reranking (if applied)
|
| 31 |
+
|
| 32 |
+
def to_dict(self) -> dict:
|
| 33 |
+
"""Convert to dictionary."""
|
| 34 |
+
return {
|
| 35 |
+
"chunk_id": self.chunk_id,
|
| 36 |
+
"text": self.text,
|
| 37 |
+
"source": self.source,
|
| 38 |
+
"category": self.category,
|
| 39 |
+
"section": self.section,
|
| 40 |
+
"priority": self.priority,
|
| 41 |
+
"content_type": self.content_type,
|
| 42 |
+
"keywords": self.keywords,
|
| 43 |
+
"similarity_score": self.similarity_score,
|
| 44 |
+
"weighted_score": self.weighted_score,
|
| 45 |
+
"final_score": self.final_score,
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class MockReranker:
|
| 50 |
+
"""Mock reranker for local development.
|
| 51 |
+
|
| 52 |
+
Simply returns scores based on keyword overlap.
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
def rerank(
|
| 56 |
+
self,
|
| 57 |
+
query: str,
|
| 58 |
+
documents: list[str],
|
| 59 |
+
) -> list[float]:
|
| 60 |
+
"""Score documents based on keyword overlap with query.
|
| 61 |
+
|
| 62 |
+
Args:
|
| 63 |
+
query: Query text
|
| 64 |
+
documents: List of document texts
|
| 65 |
+
|
| 66 |
+
Returns:
|
| 67 |
+
List of scores (0-1) for each document
|
| 68 |
+
"""
|
| 69 |
+
query_words = set(query.lower().split())
|
| 70 |
+
scores = []
|
| 71 |
+
|
| 72 |
+
for doc in documents:
|
| 73 |
+
doc_words = set(doc.lower().split())
|
| 74 |
+
# Jaccard-like overlap score
|
| 75 |
+
overlap = len(query_words & doc_words)
|
| 76 |
+
total = len(query_words | doc_words)
|
| 77 |
+
score = overlap / total if total > 0 else 0.0
|
| 78 |
+
scores.append(score)
|
| 79 |
+
|
| 80 |
+
return scores
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class RealReranker:
|
| 84 |
+
"""Real reranker using Qwen3-VL-Reranker-8B.
|
| 85 |
+
|
| 86 |
+
Loaded on-demand when MOCK_MODELS=false.
|
| 87 |
+
"""
|
| 88 |
+
|
| 89 |
+
def __init__(self):
|
| 90 |
+
self.model = None
|
| 91 |
+
self.tokenizer = None
|
| 92 |
+
|
| 93 |
+
def _load_model(self):
|
| 94 |
+
"""Lazy load the reranker model."""
|
| 95 |
+
if self.model is not None:
|
| 96 |
+
return
|
| 97 |
+
|
| 98 |
+
import torch
|
| 99 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 100 |
+
|
| 101 |
+
model_name = "Qwen/Qwen3-VL-Reranker-8B"
|
| 102 |
+
print(f"Loading reranker model: {model_name}")
|
| 103 |
+
|
| 104 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
| 105 |
+
model_name,
|
| 106 |
+
trust_remote_code=True,
|
| 107 |
+
)
|
| 108 |
+
self.model = AutoModelForSequenceClassification.from_pretrained(
|
| 109 |
+
model_name,
|
| 110 |
+
torch_dtype=torch.bfloat16,
|
| 111 |
+
device_map="auto",
|
| 112 |
+
trust_remote_code=True,
|
| 113 |
+
)
|
| 114 |
+
self.model.eval()
|
| 115 |
+
|
| 116 |
+
def rerank(
|
| 117 |
+
self,
|
| 118 |
+
query: str,
|
| 119 |
+
documents: list[str],
|
| 120 |
+
) -> list[float]:
|
| 121 |
+
"""Score documents using the reranker model.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
query: Query text
|
| 125 |
+
documents: List of document texts
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
List of scores for each document
|
| 129 |
+
"""
|
| 130 |
+
self._load_model()
|
| 131 |
+
|
| 132 |
+
import torch
|
| 133 |
+
|
| 134 |
+
scores = []
|
| 135 |
+
with torch.no_grad():
|
| 136 |
+
for doc in documents:
|
| 137 |
+
inputs = self.tokenizer(
|
| 138 |
+
query,
|
| 139 |
+
doc,
|
| 140 |
+
return_tensors="pt",
|
| 141 |
+
truncation=True,
|
| 142 |
+
max_length=512,
|
| 143 |
+
padding=True,
|
| 144 |
+
)
|
| 145 |
+
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
| 146 |
+
|
| 147 |
+
outputs = self.model(**inputs)
|
| 148 |
+
# Sigmoid to get 0-1 score
|
| 149 |
+
score = torch.sigmoid(outputs.logits).squeeze().item()
|
| 150 |
+
scores.append(score)
|
| 151 |
+
|
| 152 |
+
return scores
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def get_reranker():
|
| 156 |
+
"""Get appropriate reranker based on settings."""
|
| 157 |
+
if settings.mock_models:
|
| 158 |
+
return MockReranker()
|
| 159 |
+
return RealReranker()
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class FDAMRetriever:
|
| 163 |
+
"""FDAM-specific retriever with priority weighting.
|
| 164 |
+
|
| 165 |
+
Priority weights:
|
| 166 |
+
- primary: 1.0 (FDAM methodology)
|
| 167 |
+
- reference-threshold: 0.9 (Threshold tables)
|
| 168 |
+
- reference-narrative: 0.8 (Supporting documentation)
|
| 169 |
+
"""
|
| 170 |
+
|
| 171 |
+
PRIORITY_WEIGHTS = {
|
| 172 |
+
"primary": 1.0,
|
| 173 |
+
"reference-threshold": 0.9,
|
| 174 |
+
"reference-narrative": 0.8,
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
def __init__(
|
| 178 |
+
self,
|
| 179 |
+
vectorstore: Optional[ChromaVectorStore] = None,
|
| 180 |
+
reranker=None,
|
| 181 |
+
use_reranking: bool = True,
|
| 182 |
+
):
|
| 183 |
+
"""Initialize retriever.
|
| 184 |
+
|
| 185 |
+
Args:
|
| 186 |
+
vectorstore: ChromaDB vector store instance.
|
| 187 |
+
If None, creates default instance.
|
| 188 |
+
reranker: Reranker instance. If None, uses appropriate default.
|
| 189 |
+
use_reranking: Whether to apply reranking step.
|
| 190 |
+
"""
|
| 191 |
+
self.vectorstore = vectorstore or ChromaVectorStore()
|
| 192 |
+
self.reranker = reranker if reranker is not None else get_reranker()
|
| 193 |
+
self.use_reranking = use_reranking
|
| 194 |
+
|
| 195 |
+
def retrieve(
|
| 196 |
+
self,
|
| 197 |
+
query: str,
|
| 198 |
+
top_k: int = 5,
|
| 199 |
+
category_filter: Optional[str] = None,
|
| 200 |
+
priority_filter: Optional[str] = None,
|
| 201 |
+
include_scores: bool = True,
|
| 202 |
+
) -> list[RetrievalResult]:
|
| 203 |
+
"""Retrieve relevant chunks for a query.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
query: Query text
|
| 207 |
+
top_k: Number of results to return
|
| 208 |
+
category_filter: Optional category to filter by
|
| 209 |
+
priority_filter: Optional priority to filter by
|
| 210 |
+
include_scores: Whether to include score details
|
| 211 |
+
|
| 212 |
+
Returns:
|
| 213 |
+
List of RetrievalResult objects, sorted by final_score descending
|
| 214 |
+
"""
|
| 215 |
+
# Build metadata filter
|
| 216 |
+
where_filter = None
|
| 217 |
+
if category_filter or priority_filter:
|
| 218 |
+
where_filter = {}
|
| 219 |
+
if category_filter:
|
| 220 |
+
where_filter["category"] = category_filter
|
| 221 |
+
if priority_filter:
|
| 222 |
+
where_filter["priority"] = priority_filter
|
| 223 |
+
|
| 224 |
+
# Fetch more results than needed for reranking
|
| 225 |
+
fetch_k = top_k * 3 if self.use_reranking else top_k
|
| 226 |
+
|
| 227 |
+
# Query vector store
|
| 228 |
+
raw_results = self.vectorstore.query(
|
| 229 |
+
query_text=query,
|
| 230 |
+
n_results=fetch_k,
|
| 231 |
+
where=where_filter,
|
| 232 |
+
)
|
| 233 |
+
|
| 234 |
+
if not raw_results:
|
| 235 |
+
return []
|
| 236 |
+
|
| 237 |
+
# Convert to RetrievalResult objects with priority weighting
|
| 238 |
+
results = []
|
| 239 |
+
for r in raw_results:
|
| 240 |
+
# Convert distance to similarity (cosine distance: 0 = identical)
|
| 241 |
+
similarity = 1.0 - r["distance"]
|
| 242 |
+
|
| 243 |
+
# Apply priority weight
|
| 244 |
+
priority = r["metadata"].get("priority", "reference-narrative")
|
| 245 |
+
weight = self.PRIORITY_WEIGHTS.get(priority, 0.8)
|
| 246 |
+
weighted_score = similarity * weight
|
| 247 |
+
|
| 248 |
+
# Parse keywords
|
| 249 |
+
keywords_str = r["metadata"].get("keywords", "")
|
| 250 |
+
keywords = keywords_str.split(",") if keywords_str else []
|
| 251 |
+
|
| 252 |
+
results.append(
|
| 253 |
+
RetrievalResult(
|
| 254 |
+
chunk_id=r["id"],
|
| 255 |
+
text=r["document"],
|
| 256 |
+
source=r["metadata"].get("source", "unknown"),
|
| 257 |
+
category=r["metadata"].get("category", "unknown"),
|
| 258 |
+
section=r["metadata"].get("section", "unknown"),
|
| 259 |
+
priority=priority,
|
| 260 |
+
content_type=r["metadata"].get("content_type", "narrative"),
|
| 261 |
+
keywords=keywords,
|
| 262 |
+
similarity_score=similarity,
|
| 263 |
+
weighted_score=weighted_score,
|
| 264 |
+
final_score=weighted_score, # Will be updated by reranking
|
| 265 |
+
)
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
# Apply reranking if enabled
|
| 269 |
+
if self.use_reranking and results:
|
| 270 |
+
documents = [r.text for r in results]
|
| 271 |
+
rerank_scores = self.reranker.rerank(query, documents)
|
| 272 |
+
|
| 273 |
+
# Combine weighted score with rerank score
|
| 274 |
+
# Final = 0.6 * weighted + 0.4 * rerank
|
| 275 |
+
for i, result in enumerate(results):
|
| 276 |
+
rerank_score = rerank_scores[i]
|
| 277 |
+
result.final_score = 0.6 * result.weighted_score + 0.4 * rerank_score
|
| 278 |
+
|
| 279 |
+
# Sort by final score (descending) and take top_k
|
| 280 |
+
results.sort(key=lambda x: x.final_score, reverse=True)
|
| 281 |
+
return results[:top_k]
|
| 282 |
+
|
| 283 |
+
def retrieve_for_context(
|
| 284 |
+
self,
|
| 285 |
+
query: str,
|
| 286 |
+
top_k: int = 5,
|
| 287 |
+
) -> str:
|
| 288 |
+
"""Retrieve and format chunks as context string for LLM.
|
| 289 |
+
|
| 290 |
+
Args:
|
| 291 |
+
query: Query text
|
| 292 |
+
top_k: Number of chunks to include
|
| 293 |
+
|
| 294 |
+
Returns:
|
| 295 |
+
Formatted context string with source citations
|
| 296 |
+
"""
|
| 297 |
+
results = self.retrieve(query, top_k=top_k)
|
| 298 |
+
|
| 299 |
+
if not results:
|
| 300 |
+
return "No relevant context found."
|
| 301 |
+
|
| 302 |
+
context_parts = []
|
| 303 |
+
for i, r in enumerate(results, 1):
|
| 304 |
+
context_parts.append(
|
| 305 |
+
f"[{i}] Source: {r.source} | Section: {r.section}\n{r.text}"
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
return "\n\n---\n\n".join(context_parts)
|
| 309 |
+
|
| 310 |
+
def retrieve_thresholds(
|
| 311 |
+
self,
|
| 312 |
+
material_type: str,
|
| 313 |
+
facility_type: str,
|
| 314 |
+
) -> list[RetrievalResult]:
|
| 315 |
+
"""Retrieve threshold values for a specific material and facility type.
|
| 316 |
+
|
| 317 |
+
Convenience method for threshold lookups.
|
| 318 |
+
|
| 319 |
+
Args:
|
| 320 |
+
material_type: Type of material (e.g., "lead", "soot", "char")
|
| 321 |
+
facility_type: Facility classification
|
| 322 |
+
|
| 323 |
+
Returns:
|
| 324 |
+
Relevant threshold results
|
| 325 |
+
"""
|
| 326 |
+
query = f"{material_type} threshold {facility_type} clearance criteria"
|
| 327 |
+
return self.retrieve(
|
| 328 |
+
query=query,
|
| 329 |
+
top_k=3,
|
| 330 |
+
category_filter="thresholds",
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
def retrieve_disposition(
|
| 334 |
+
self,
|
| 335 |
+
zone: str,
|
| 336 |
+
condition: str,
|
| 337 |
+
material_type: Optional[str] = None,
|
| 338 |
+
) -> list[RetrievalResult]:
|
| 339 |
+
"""Retrieve disposition guidance for zone/condition combination.
|
| 340 |
+
|
| 341 |
+
Convenience method for disposition lookups.
|
| 342 |
+
|
| 343 |
+
Args:
|
| 344 |
+
zone: Zone classification (burn-zone, near-field, far-field)
|
| 345 |
+
condition: Condition level (background, light, moderate, heavy, structural-damage)
|
| 346 |
+
material_type: Optional material type for specific guidance
|
| 347 |
+
|
| 348 |
+
Returns:
|
| 349 |
+
Relevant disposition results
|
| 350 |
+
"""
|
| 351 |
+
query = f"disposition {zone} {condition}"
|
| 352 |
+
if material_type:
|
| 353 |
+
query += f" {material_type}"
|
| 354 |
+
query += " cleaning recommendation"
|
| 355 |
+
|
| 356 |
+
return self.retrieve(
|
| 357 |
+
query=query,
|
| 358 |
+
top_k=5,
|
| 359 |
+
priority_filter="primary", # Prefer FDAM methodology
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
def retrieve_cleaning_method(
|
| 363 |
+
self,
|
| 364 |
+
surface_type: str,
|
| 365 |
+
condition: str,
|
| 366 |
+
) -> list[RetrievalResult]:
|
| 367 |
+
"""Retrieve cleaning method recommendations.
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
surface_type: Type of surface (e.g., "drywall", "concrete", "metal")
|
| 371 |
+
condition: Condition level
|
| 372 |
+
|
| 373 |
+
Returns:
|
| 374 |
+
Relevant cleaning method results
|
| 375 |
+
"""
|
| 376 |
+
query = f"cleaning method {surface_type} {condition} procedure hepa"
|
| 377 |
+
return self.retrieve(
|
| 378 |
+
query=query,
|
| 379 |
+
top_k=5,
|
| 380 |
+
)
|
rag/vectorstore.py
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""ChromaDB vector store for FDAM knowledge base.
|
| 2 |
+
|
| 3 |
+
Provides embedding and storage with metadata support.
|
| 4 |
+
Uses mock embeddings when MOCK_MODELS=true for local development.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import hashlib
|
| 8 |
+
from typing import Optional
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
import chromadb
|
| 12 |
+
from chromadb.config import Settings
|
| 13 |
+
|
| 14 |
+
from config.settings import settings
|
| 15 |
+
from .chunker import Chunk
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class MockEmbeddingFunction:
|
| 19 |
+
"""Mock embedding function for local development.
|
| 20 |
+
|
| 21 |
+
Generates deterministic pseudo-embeddings based on text hash.
|
| 22 |
+
Produces 384-dimensional vectors (matches common embedding models).
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
EMBEDDING_DIM = 384
|
| 26 |
+
|
| 27 |
+
def __call__(self, input: list[str]) -> list[list[float]]:
|
| 28 |
+
"""Generate mock embeddings for a list of texts."""
|
| 29 |
+
return [self._embed_text(text) for text in input]
|
| 30 |
+
|
| 31 |
+
def _embed_text(self, text: str) -> list[float]:
|
| 32 |
+
"""Generate a deterministic pseudo-embedding from text.
|
| 33 |
+
|
| 34 |
+
Uses SHA-256 hash expanded to fill embedding dimensions.
|
| 35 |
+
Not semantically meaningful but provides consistent behavior.
|
| 36 |
+
"""
|
| 37 |
+
# Hash the text
|
| 38 |
+
text_hash = hashlib.sha256(text.encode("utf-8")).digest()
|
| 39 |
+
|
| 40 |
+
# Expand hash to fill embedding dimensions
|
| 41 |
+
embedding = []
|
| 42 |
+
for i in range(self.EMBEDDING_DIM):
|
| 43 |
+
# Use hash bytes cyclically, normalized to [-1, 1]
|
| 44 |
+
byte_val = text_hash[i % len(text_hash)]
|
| 45 |
+
normalized = (byte_val / 127.5) - 1.0
|
| 46 |
+
embedding.append(normalized)
|
| 47 |
+
|
| 48 |
+
return embedding
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class RealEmbeddingFunction:
|
| 52 |
+
"""Real embedding function using Qwen3-VL-Embedding-8B.
|
| 53 |
+
|
| 54 |
+
Loaded on-demand when MOCK_MODELS=false.
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
EMBEDDING_DIM = 4096 # Qwen embedding dimension
|
| 58 |
+
|
| 59 |
+
def __init__(self):
|
| 60 |
+
self.model = None
|
| 61 |
+
self.tokenizer = None
|
| 62 |
+
|
| 63 |
+
def _load_model(self):
|
| 64 |
+
"""Lazy load the embedding model."""
|
| 65 |
+
if self.model is not None:
|
| 66 |
+
return
|
| 67 |
+
|
| 68 |
+
import torch
|
| 69 |
+
from transformers import AutoModel, AutoTokenizer
|
| 70 |
+
|
| 71 |
+
model_name = "Qwen/Qwen3-VL-Embedding-8B"
|
| 72 |
+
print(f"Loading embedding model: {model_name}")
|
| 73 |
+
|
| 74 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
| 75 |
+
model_name,
|
| 76 |
+
trust_remote_code=True,
|
| 77 |
+
)
|
| 78 |
+
self.model = AutoModel.from_pretrained(
|
| 79 |
+
model_name,
|
| 80 |
+
torch_dtype=torch.bfloat16,
|
| 81 |
+
device_map="auto",
|
| 82 |
+
trust_remote_code=True,
|
| 83 |
+
)
|
| 84 |
+
self.model.eval()
|
| 85 |
+
|
| 86 |
+
def __call__(self, input: list[str]) -> list[list[float]]:
|
| 87 |
+
"""Generate embeddings for a list of texts."""
|
| 88 |
+
self._load_model()
|
| 89 |
+
|
| 90 |
+
import torch
|
| 91 |
+
|
| 92 |
+
embeddings = []
|
| 93 |
+
with torch.no_grad():
|
| 94 |
+
for text in input:
|
| 95 |
+
inputs = self.tokenizer(
|
| 96 |
+
text,
|
| 97 |
+
return_tensors="pt",
|
| 98 |
+
truncation=True,
|
| 99 |
+
max_length=512,
|
| 100 |
+
padding=True,
|
| 101 |
+
)
|
| 102 |
+
inputs = {k: v.to(self.model.device) for k, v in inputs.items()}
|
| 103 |
+
|
| 104 |
+
outputs = self.model(**inputs)
|
| 105 |
+
# Use mean pooling over sequence
|
| 106 |
+
embedding = outputs.last_hidden_state.mean(dim=1).squeeze()
|
| 107 |
+
embeddings.append(embedding.cpu().float().tolist())
|
| 108 |
+
|
| 109 |
+
return embeddings
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_embedding_function():
|
| 113 |
+
"""Get appropriate embedding function based on settings."""
|
| 114 |
+
if settings.mock_models:
|
| 115 |
+
return MockEmbeddingFunction()
|
| 116 |
+
return RealEmbeddingFunction()
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class ChromaVectorStore:
|
| 120 |
+
"""ChromaDB-based vector store for FDAM knowledge base."""
|
| 121 |
+
|
| 122 |
+
COLLECTION_NAME = "fdam_knowledge_base"
|
| 123 |
+
|
| 124 |
+
def __init__(
|
| 125 |
+
self,
|
| 126 |
+
persist_directory: Optional[str] = None,
|
| 127 |
+
embedding_function=None,
|
| 128 |
+
):
|
| 129 |
+
"""Initialize vector store.
|
| 130 |
+
|
| 131 |
+
Args:
|
| 132 |
+
persist_directory: Directory for ChromaDB persistence.
|
| 133 |
+
If None, uses in-memory storage.
|
| 134 |
+
embedding_function: Custom embedding function.
|
| 135 |
+
If None, uses appropriate default.
|
| 136 |
+
"""
|
| 137 |
+
self.persist_directory = persist_directory
|
| 138 |
+
|
| 139 |
+
# Initialize ChromaDB client
|
| 140 |
+
if persist_directory:
|
| 141 |
+
persist_path = Path(persist_directory)
|
| 142 |
+
persist_path.mkdir(parents=True, exist_ok=True)
|
| 143 |
+
self.client = chromadb.PersistentClient(
|
| 144 |
+
path=str(persist_path),
|
| 145 |
+
settings=Settings(anonymized_telemetry=False),
|
| 146 |
+
)
|
| 147 |
+
else:
|
| 148 |
+
self.client = chromadb.Client(
|
| 149 |
+
settings=Settings(anonymized_telemetry=False),
|
| 150 |
+
)
|
| 151 |
+
|
| 152 |
+
# Set up embedding function
|
| 153 |
+
self.embedding_function = embedding_function or get_embedding_function()
|
| 154 |
+
|
| 155 |
+
# Get or create collection
|
| 156 |
+
self.collection = self.client.get_or_create_collection(
|
| 157 |
+
name=self.COLLECTION_NAME,
|
| 158 |
+
metadata={"hnsw:space": "cosine"},
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
def add_chunks(self, chunks: list[Chunk]) -> int:
|
| 162 |
+
"""Add chunks to the vector store.
|
| 163 |
+
|
| 164 |
+
Args:
|
| 165 |
+
chunks: List of Chunk objects to add
|
| 166 |
+
|
| 167 |
+
Returns:
|
| 168 |
+
Number of chunks added
|
| 169 |
+
"""
|
| 170 |
+
if not chunks:
|
| 171 |
+
return 0
|
| 172 |
+
|
| 173 |
+
ids = [chunk.id for chunk in chunks]
|
| 174 |
+
documents = [chunk.text for chunk in chunks]
|
| 175 |
+
metadatas = [chunk.to_metadata() for chunk in chunks]
|
| 176 |
+
|
| 177 |
+
# Generate embeddings
|
| 178 |
+
embeddings = self.embedding_function(documents)
|
| 179 |
+
|
| 180 |
+
# Add to collection
|
| 181 |
+
self.collection.add(
|
| 182 |
+
ids=ids,
|
| 183 |
+
embeddings=embeddings,
|
| 184 |
+
documents=documents,
|
| 185 |
+
metadatas=metadatas,
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
return len(chunks)
|
| 189 |
+
|
| 190 |
+
def query(
|
| 191 |
+
self,
|
| 192 |
+
query_text: str,
|
| 193 |
+
n_results: int = 5,
|
| 194 |
+
where: Optional[dict] = None,
|
| 195 |
+
where_document: Optional[dict] = None,
|
| 196 |
+
) -> list[dict]:
|
| 197 |
+
"""Query the vector store.
|
| 198 |
+
|
| 199 |
+
Args:
|
| 200 |
+
query_text: Query text to search for
|
| 201 |
+
n_results: Number of results to return
|
| 202 |
+
where: Metadata filter (e.g., {"priority": "primary"})
|
| 203 |
+
where_document: Document content filter
|
| 204 |
+
|
| 205 |
+
Returns:
|
| 206 |
+
List of result dicts with keys: id, document, metadata, distance
|
| 207 |
+
"""
|
| 208 |
+
# Generate query embedding
|
| 209 |
+
query_embedding = self.embedding_function([query_text])[0]
|
| 210 |
+
|
| 211 |
+
# Query collection
|
| 212 |
+
results = self.collection.query(
|
| 213 |
+
query_embeddings=[query_embedding],
|
| 214 |
+
n_results=n_results,
|
| 215 |
+
where=where,
|
| 216 |
+
where_document=where_document,
|
| 217 |
+
include=["documents", "metadatas", "distances"],
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Format results
|
| 221 |
+
formatted = []
|
| 222 |
+
if results["ids"] and results["ids"][0]:
|
| 223 |
+
for i, chunk_id in enumerate(results["ids"][0]):
|
| 224 |
+
formatted.append(
|
| 225 |
+
{
|
| 226 |
+
"id": chunk_id,
|
| 227 |
+
"document": results["documents"][0][i],
|
| 228 |
+
"metadata": results["metadatas"][0][i],
|
| 229 |
+
"distance": results["distances"][0][i],
|
| 230 |
+
}
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
return formatted
|
| 234 |
+
|
| 235 |
+
def get_stats(self) -> dict:
|
| 236 |
+
"""Get collection statistics."""
|
| 237 |
+
count = self.collection.count()
|
| 238 |
+
|
| 239 |
+
# Get category distribution
|
| 240 |
+
categories = {}
|
| 241 |
+
priorities = {}
|
| 242 |
+
|
| 243 |
+
if count > 0:
|
| 244 |
+
# Sample all documents to get metadata distribution
|
| 245 |
+
all_results = self.collection.get(include=["metadatas"])
|
| 246 |
+
for metadata in all_results["metadatas"]:
|
| 247 |
+
cat = metadata.get("category", "unknown")
|
| 248 |
+
pri = metadata.get("priority", "unknown")
|
| 249 |
+
categories[cat] = categories.get(cat, 0) + 1
|
| 250 |
+
priorities[pri] = priorities.get(pri, 0) + 1
|
| 251 |
+
|
| 252 |
+
return {
|
| 253 |
+
"total_chunks": count,
|
| 254 |
+
"categories": categories,
|
| 255 |
+
"priorities": priorities,
|
| 256 |
+
"collection_name": self.COLLECTION_NAME,
|
| 257 |
+
"persist_directory": self.persist_directory,
|
| 258 |
+
}
|
| 259 |
+
|
| 260 |
+
def clear(self):
|
| 261 |
+
"""Clear all data from the collection."""
|
| 262 |
+
self.client.delete_collection(self.COLLECTION_NAME)
|
| 263 |
+
self.collection = self.client.get_or_create_collection(
|
| 264 |
+
name=self.COLLECTION_NAME,
|
| 265 |
+
metadata={"hnsw:space": "cosine"},
|
| 266 |
+
)
|
| 267 |
+
|
| 268 |
+
def delete_by_source(self, source: str) -> int:
|
| 269 |
+
"""Delete all chunks from a specific source.
|
| 270 |
+
|
| 271 |
+
Args:
|
| 272 |
+
source: Source filename to delete
|
| 273 |
+
|
| 274 |
+
Returns:
|
| 275 |
+
Number of chunks deleted
|
| 276 |
+
"""
|
| 277 |
+
# Get IDs of chunks from this source
|
| 278 |
+
results = self.collection.get(
|
| 279 |
+
where={"source": source},
|
| 280 |
+
include=[],
|
| 281 |
+
)
|
| 282 |
+
|
| 283 |
+
if results["ids"]:
|
| 284 |
+
self.collection.delete(ids=results["ids"])
|
| 285 |
+
return len(results["ids"])
|
| 286 |
+
|
| 287 |
+
return 0
|
requirements.txt
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core ML/AI
|
| 2 |
+
torch
|
| 3 |
+
transformers>=4.57.0
|
| 4 |
+
accelerate
|
| 5 |
+
qwen-vl-utils>=0.0.14
|
| 6 |
+
torchvision
|
| 7 |
+
|
| 8 |
+
# UI
|
| 9 |
+
gradio
|
| 10 |
+
|
| 11 |
+
# RAG/Vector Store
|
| 12 |
+
chromadb
|
| 13 |
+
|
| 14 |
+
# Data Validation
|
| 15 |
+
pydantic
|
| 16 |
+
pydantic-settings
|
| 17 |
+
|
| 18 |
+
# Image Processing
|
| 19 |
+
pillow
|
| 20 |
+
|
| 21 |
+
# PDF Processing
|
| 22 |
+
pdfplumber
|
| 23 |
+
weasyprint>=60.0
|
| 24 |
+
markdown>=3.5
|
| 25 |
+
|
| 26 |
+
# Utilities
|
| 27 |
+
numpy
|
| 28 |
+
|
| 29 |
+
# Testing
|
| 30 |
+
pytest
|
| 31 |
+
pytest-asyncio
|
schemas/__init__.py
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FDAM AI Pipeline Pydantic schemas.
|
| 2 |
+
|
| 3 |
+
Exports all input and output models for convenient imports.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .input import (
|
| 7 |
+
# Type definitions
|
| 8 |
+
FacilityClassification,
|
| 9 |
+
ConstructionEra,
|
| 10 |
+
ZoneType,
|
| 11 |
+
ConditionLevel,
|
| 12 |
+
MaterialType,
|
| 13 |
+
MaterialCategory,
|
| 14 |
+
Disposition,
|
| 15 |
+
OdorIntensity,
|
| 16 |
+
CharDensity,
|
| 17 |
+
SampleType,
|
| 18 |
+
Priority,
|
| 19 |
+
# Helper functions
|
| 20 |
+
get_material_category,
|
| 21 |
+
# Input models
|
| 22 |
+
ProjectInfo,
|
| 23 |
+
Dimensions,
|
| 24 |
+
Surface,
|
| 25 |
+
Room,
|
| 26 |
+
BoundingBox,
|
| 27 |
+
ImageAnnotation,
|
| 28 |
+
ImageMetadata,
|
| 29 |
+
QualitativeObservations,
|
| 30 |
+
AssessmentInput,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from .output import (
|
| 34 |
+
# Vision analysis
|
| 35 |
+
ZoneAnalysis,
|
| 36 |
+
ConditionAnalysis,
|
| 37 |
+
DetectedMaterial,
|
| 38 |
+
CombustionIndicators,
|
| 39 |
+
SamplingRecommendation,
|
| 40 |
+
VisionAnalysisResult,
|
| 41 |
+
# Calculations
|
| 42 |
+
RoomAreaSummary,
|
| 43 |
+
SurfaceAreas,
|
| 44 |
+
AirFiltration,
|
| 45 |
+
SampleDensity,
|
| 46 |
+
LaborEstimate,
|
| 47 |
+
EquipmentRequirements,
|
| 48 |
+
RegulatoryFlag,
|
| 49 |
+
RegulatoryFlags,
|
| 50 |
+
CalculationResults,
|
| 51 |
+
# Documents
|
| 52 |
+
GeneratedDocuments,
|
| 53 |
+
# Confidence
|
| 54 |
+
FlaggedItem,
|
| 55 |
+
ConfidenceReport,
|
| 56 |
+
# Final output
|
| 57 |
+
AssessmentOutput,
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
__all__ = [
|
| 61 |
+
# Type definitions
|
| 62 |
+
"FacilityClassification",
|
| 63 |
+
"ConstructionEra",
|
| 64 |
+
"ZoneType",
|
| 65 |
+
"ConditionLevel",
|
| 66 |
+
"MaterialType",
|
| 67 |
+
"MaterialCategory",
|
| 68 |
+
"Disposition",
|
| 69 |
+
"OdorIntensity",
|
| 70 |
+
"CharDensity",
|
| 71 |
+
"SampleType",
|
| 72 |
+
"Priority",
|
| 73 |
+
# Helper functions
|
| 74 |
+
"get_material_category",
|
| 75 |
+
# Input models
|
| 76 |
+
"ProjectInfo",
|
| 77 |
+
"Dimensions",
|
| 78 |
+
"Surface",
|
| 79 |
+
"Room",
|
| 80 |
+
"BoundingBox",
|
| 81 |
+
"ImageAnnotation",
|
| 82 |
+
"ImageMetadata",
|
| 83 |
+
"QualitativeObservations",
|
| 84 |
+
"AssessmentInput",
|
| 85 |
+
# Vision analysis
|
| 86 |
+
"ZoneAnalysis",
|
| 87 |
+
"ConditionAnalysis",
|
| 88 |
+
"DetectedMaterial",
|
| 89 |
+
"CombustionIndicators",
|
| 90 |
+
"SamplingRecommendation",
|
| 91 |
+
"VisionAnalysisResult",
|
| 92 |
+
# Calculations
|
| 93 |
+
"RoomAreaSummary",
|
| 94 |
+
"SurfaceAreas",
|
| 95 |
+
"AirFiltration",
|
| 96 |
+
"SampleDensity",
|
| 97 |
+
"LaborEstimate",
|
| 98 |
+
"EquipmentRequirements",
|
| 99 |
+
"RegulatoryFlag",
|
| 100 |
+
"RegulatoryFlags",
|
| 101 |
+
"CalculationResults",
|
| 102 |
+
# Documents
|
| 103 |
+
"GeneratedDocuments",
|
| 104 |
+
# Confidence
|
| 105 |
+
"FlaggedItem",
|
| 106 |
+
"ConfidenceReport",
|
| 107 |
+
# Final output
|
| 108 |
+
"AssessmentOutput",
|
| 109 |
+
]
|
schemas/input.py
ADDED
|
@@ -0,0 +1,255 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic input models for FDAM AI Pipeline.
|
| 2 |
+
|
| 3 |
+
Uses Literal unions instead of Enums per project code style.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from datetime import date
|
| 7 |
+
from typing import Literal, Optional
|
| 8 |
+
|
| 9 |
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# --- Type Definitions (Literal unions) ---
|
| 13 |
+
|
| 14 |
+
FacilityClassification = Literal["operational", "non-operational", "public-childcare"]
|
| 15 |
+
ConstructionEra = Literal["pre-1980", "1980-2000", "post-2000"]
|
| 16 |
+
ZoneType = Literal["burn", "near-field", "far-field"]
|
| 17 |
+
ConditionLevel = Literal["background", "light", "moderate", "heavy", "structural-damage"]
|
| 18 |
+
|
| 19 |
+
# Material categories
|
| 20 |
+
MaterialType = Literal[
|
| 21 |
+
# Non-porous
|
| 22 |
+
"steel",
|
| 23 |
+
"concrete",
|
| 24 |
+
"glass",
|
| 25 |
+
"metal",
|
| 26 |
+
"cmu",
|
| 27 |
+
# Semi-porous
|
| 28 |
+
"drywall-painted",
|
| 29 |
+
"drywall-unpainted",
|
| 30 |
+
"wood-sealed",
|
| 31 |
+
"wood-unsealed",
|
| 32 |
+
# Porous
|
| 33 |
+
"carpet",
|
| 34 |
+
"carpet-pad",
|
| 35 |
+
"insulation-fiberglass",
|
| 36 |
+
"insulation-other",
|
| 37 |
+
"acoustic-tile",
|
| 38 |
+
"upholstery",
|
| 39 |
+
# HVAC
|
| 40 |
+
"ductwork-rigid",
|
| 41 |
+
"ductwork-flexible",
|
| 42 |
+
"hvac-interior-insulation",
|
| 43 |
+
]
|
| 44 |
+
|
| 45 |
+
MaterialCategory = Literal["non-porous", "semi-porous", "porous", "hvac"]
|
| 46 |
+
|
| 47 |
+
Disposition = Literal["no-action", "clean", "evaluate", "remove", "remove-repair"]
|
| 48 |
+
|
| 49 |
+
OdorIntensity = Literal["none", "faint", "moderate", "strong"]
|
| 50 |
+
CharDensity = Literal["sparse", "moderate", "dense"]
|
| 51 |
+
SampleType = Literal["tape_lift", "surface_wipe", "both"]
|
| 52 |
+
Priority = Literal["high", "medium", "low"]
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# --- Helper Functions ---
|
| 56 |
+
|
| 57 |
+
def get_material_category(material: MaterialType) -> MaterialCategory:
|
| 58 |
+
"""Get the category for a material type."""
|
| 59 |
+
non_porous = {"steel", "concrete", "glass", "metal", "cmu"}
|
| 60 |
+
semi_porous = {"drywall-painted", "drywall-unpainted", "wood-sealed", "wood-unsealed"}
|
| 61 |
+
porous = {"carpet", "carpet-pad", "insulation-fiberglass", "insulation-other", "acoustic-tile", "upholstery"}
|
| 62 |
+
hvac = {"ductwork-rigid", "ductwork-flexible", "hvac-interior-insulation"}
|
| 63 |
+
|
| 64 |
+
if material in non_porous:
|
| 65 |
+
return "non-porous"
|
| 66 |
+
elif material in semi_porous:
|
| 67 |
+
return "semi-porous"
|
| 68 |
+
elif material in porous:
|
| 69 |
+
return "porous"
|
| 70 |
+
elif material in hvac:
|
| 71 |
+
return "hvac"
|
| 72 |
+
else:
|
| 73 |
+
return "porous" # Conservative default
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
# --- Project Level ---
|
| 77 |
+
|
| 78 |
+
class ProjectInfo(BaseModel):
|
| 79 |
+
"""Project-level information."""
|
| 80 |
+
|
| 81 |
+
project_name: str = Field(..., min_length=1, description="Project or facility name")
|
| 82 |
+
address: str = Field(..., min_length=1, description="Full street address")
|
| 83 |
+
city: str = Field(..., min_length=1)
|
| 84 |
+
state: str = Field(..., min_length=2, max_length=2)
|
| 85 |
+
zip_code: str = Field(..., min_length=5)
|
| 86 |
+
|
| 87 |
+
client_name: str = Field(..., min_length=1)
|
| 88 |
+
client_contact: Optional[str] = None
|
| 89 |
+
client_email: Optional[str] = None
|
| 90 |
+
client_phone: Optional[str] = None
|
| 91 |
+
|
| 92 |
+
fire_date: date = Field(..., description="Date of fire incident")
|
| 93 |
+
assessment_date: date = Field(..., description="Date of assessment")
|
| 94 |
+
|
| 95 |
+
facility_classification: FacilityClassification
|
| 96 |
+
construction_era: ConstructionEra
|
| 97 |
+
|
| 98 |
+
assessor_name: str = Field(..., min_length=1, description="Industrial hygienist name")
|
| 99 |
+
assessor_credentials: Optional[str] = Field(None, description="CIH, CSP, etc.")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
# --- Room/Area Level ---
|
| 103 |
+
|
| 104 |
+
class Dimensions(BaseModel):
|
| 105 |
+
"""Room dimensions for calculations."""
|
| 106 |
+
|
| 107 |
+
length_ft: float = Field(..., gt=0, le=10000, description="Length in feet")
|
| 108 |
+
width_ft: float = Field(..., gt=0, le=10000, description="Width in feet")
|
| 109 |
+
ceiling_height_ft: float = Field(..., gt=0, le=500, description="Ceiling height in feet")
|
| 110 |
+
|
| 111 |
+
@property
|
| 112 |
+
def area_sf(self) -> float:
|
| 113 |
+
"""Calculate floor area in square feet."""
|
| 114 |
+
return self.length_ft * self.width_ft
|
| 115 |
+
|
| 116 |
+
@property
|
| 117 |
+
def volume_cf(self) -> float:
|
| 118 |
+
"""Calculate volume in cubic feet."""
|
| 119 |
+
return self.area_sf * self.ceiling_height_ft
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class Surface(BaseModel):
|
| 123 |
+
"""Individual surface within a room."""
|
| 124 |
+
|
| 125 |
+
id: str = Field(..., min_length=1, description="Unique surface identifier")
|
| 126 |
+
material: MaterialType = Field(..., description="Material type")
|
| 127 |
+
description: str = Field(..., min_length=1, description="e.g., 'North wall drywall'")
|
| 128 |
+
area_sf: float = Field(..., gt=0, description="Surface area in square feet")
|
| 129 |
+
|
| 130 |
+
zone: Optional[ZoneType] = Field(None, description="Can be set by AI or user")
|
| 131 |
+
condition: Optional[ConditionLevel] = Field(None, description="Can be set by AI or user")
|
| 132 |
+
disposition: Optional[Disposition] = Field(None, description="Calculated by system")
|
| 133 |
+
|
| 134 |
+
ai_detected: bool = Field(False, description="Was this detected by AI from images?")
|
| 135 |
+
confidence: Optional[float] = Field(None, ge=0, le=1, description="AI confidence score")
|
| 136 |
+
|
| 137 |
+
@property
|
| 138 |
+
def category(self) -> MaterialCategory:
|
| 139 |
+
"""Get the material category."""
|
| 140 |
+
return get_material_category(self.material)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class Room(BaseModel):
|
| 144 |
+
"""Room or area within the building."""
|
| 145 |
+
|
| 146 |
+
id: str = Field(..., min_length=1, description="Unique room identifier")
|
| 147 |
+
name: str = Field(..., min_length=1, description="e.g., 'Warehouse Bay A'")
|
| 148 |
+
floor: Optional[str] = Field(None, description="e.g., 'Ground Floor'")
|
| 149 |
+
|
| 150 |
+
dimensions: Dimensions
|
| 151 |
+
|
| 152 |
+
zone_classification: Optional[ZoneType] = Field(None, description="AI-determined or user override")
|
| 153 |
+
zone_confidence: Optional[float] = Field(None, ge=0, le=1)
|
| 154 |
+
zone_user_override: bool = Field(False)
|
| 155 |
+
|
| 156 |
+
surfaces: list[Surface] = Field(default_factory=list)
|
| 157 |
+
image_ids: list[str] = Field(default_factory=list, description="Associated image IDs")
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
# --- Image Level ---
|
| 161 |
+
|
| 162 |
+
class BoundingBox(BaseModel):
|
| 163 |
+
"""Bounding box for detected elements in an image."""
|
| 164 |
+
|
| 165 |
+
x: float = Field(..., ge=0, le=1, description="X coordinate (normalized 0-1)")
|
| 166 |
+
y: float = Field(..., ge=0, le=1, description="Y coordinate (normalized 0-1)")
|
| 167 |
+
width: float = Field(..., gt=0, le=1, description="Width (normalized 0-1)")
|
| 168 |
+
height: float = Field(..., gt=0, le=1, description="Height (normalized 0-1)")
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class ImageAnnotation(BaseModel):
|
| 172 |
+
"""Annotation for a detected element in an image."""
|
| 173 |
+
|
| 174 |
+
label: str
|
| 175 |
+
bounding_box: BoundingBox
|
| 176 |
+
confidence: Optional[float] = Field(None, ge=0, le=1)
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class ImageMetadata(BaseModel):
|
| 180 |
+
"""Metadata for uploaded image."""
|
| 181 |
+
|
| 182 |
+
id: str = Field(..., min_length=1)
|
| 183 |
+
filename: str = Field(..., min_length=1)
|
| 184 |
+
room_id: str = Field(..., min_length=1, description="Associated room ID")
|
| 185 |
+
description: Optional[str] = Field(None, description="User description of image")
|
| 186 |
+
|
| 187 |
+
# AI-populated fields
|
| 188 |
+
detected_materials: list[MaterialType] = Field(default_factory=list)
|
| 189 |
+
detected_zone: Optional[ZoneType] = None
|
| 190 |
+
zone_confidence: Optional[float] = Field(None, ge=0, le=1)
|
| 191 |
+
detected_condition: Optional[ConditionLevel] = None
|
| 192 |
+
condition_confidence: Optional[float] = Field(None, ge=0, le=1)
|
| 193 |
+
|
| 194 |
+
# Bounding box annotations (for UI overlay)
|
| 195 |
+
annotations: list[ImageAnnotation] = Field(default_factory=list)
|
| 196 |
+
|
| 197 |
+
analysis_complete: bool = Field(False)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
# --- Qualitative Observations ---
|
| 201 |
+
|
| 202 |
+
class QualitativeObservations(BaseModel):
|
| 203 |
+
"""Qualitative observation checklist per FDAM 2.3."""
|
| 204 |
+
|
| 205 |
+
smoke_fire_odor: bool = Field(..., description="Smoke/fire odor present?")
|
| 206 |
+
odor_intensity: Optional[OdorIntensity] = None
|
| 207 |
+
|
| 208 |
+
visible_soot_deposits: bool = Field(..., description="Visible soot deposits?")
|
| 209 |
+
soot_pattern_description: Optional[str] = None
|
| 210 |
+
|
| 211 |
+
large_char_particles: bool = Field(..., description="Large char particles observed?")
|
| 212 |
+
char_density_estimate: Optional[CharDensity] = None
|
| 213 |
+
|
| 214 |
+
ash_like_residue: bool = Field(..., description="Ash-like residue present?")
|
| 215 |
+
ash_color_texture: Optional[str] = None
|
| 216 |
+
|
| 217 |
+
surface_discoloration: bool = Field(..., description="Surface discoloration?")
|
| 218 |
+
discoloration_description: Optional[str] = None
|
| 219 |
+
|
| 220 |
+
dust_loading_interference: bool = Field(..., description="Dust loading or interference?")
|
| 221 |
+
dust_notes: Optional[str] = None
|
| 222 |
+
|
| 223 |
+
wildfire_indicators: bool = Field(..., description="Burned soil/pollen/vegetation indicators?")
|
| 224 |
+
wildfire_notes: Optional[str] = None
|
| 225 |
+
|
| 226 |
+
additional_notes: Optional[str] = None
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
# --- Complete Assessment Input ---
|
| 230 |
+
|
| 231 |
+
class AssessmentInput(BaseModel):
|
| 232 |
+
"""Complete input for FDAM AI assessment."""
|
| 233 |
+
|
| 234 |
+
project: ProjectInfo
|
| 235 |
+
rooms: list[Room] = Field(..., min_length=1)
|
| 236 |
+
images: list[ImageMetadata] = Field(default_factory=list, max_length=20)
|
| 237 |
+
observations: QualitativeObservations
|
| 238 |
+
|
| 239 |
+
@field_validator("rooms")
|
| 240 |
+
@classmethod
|
| 241 |
+
def validate_room_ids(cls, rooms: list[Room]) -> list[Room]:
|
| 242 |
+
"""Ensure room IDs are unique."""
|
| 243 |
+
ids = [r.id for r in rooms]
|
| 244 |
+
if len(ids) != len(set(ids)):
|
| 245 |
+
raise ValueError("Room IDs must be unique")
|
| 246 |
+
return rooms
|
| 247 |
+
|
| 248 |
+
@model_validator(mode="after")
|
| 249 |
+
def validate_image_rooms(self) -> "AssessmentInput":
|
| 250 |
+
"""Ensure all images reference valid room IDs."""
|
| 251 |
+
room_ids = {r.id for r in self.rooms}
|
| 252 |
+
for img in self.images:
|
| 253 |
+
if img.room_id not in room_ids:
|
| 254 |
+
raise ValueError(f"Image {img.id} references unknown room {img.room_id}")
|
| 255 |
+
return self
|
schemas/output.py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic output models for FDAM AI Pipeline.
|
| 2 |
+
|
| 3 |
+
Contains vision analysis results, calculation outputs, and final assessment output.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from typing import Optional
|
| 7 |
+
|
| 8 |
+
from pydantic import BaseModel, Field
|
| 9 |
+
|
| 10 |
+
from .input import (
|
| 11 |
+
AssessmentInput,
|
| 12 |
+
BoundingBox,
|
| 13 |
+
ConditionLevel,
|
| 14 |
+
MaterialCategory,
|
| 15 |
+
MaterialType,
|
| 16 |
+
Priority,
|
| 17 |
+
SampleType,
|
| 18 |
+
ZoneType,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# --- Vision Analysis Output ---
|
| 23 |
+
|
| 24 |
+
class ZoneAnalysis(BaseModel):
|
| 25 |
+
"""Zone classification from vision analysis."""
|
| 26 |
+
|
| 27 |
+
classification: ZoneType
|
| 28 |
+
confidence: float = Field(..., ge=0, le=1)
|
| 29 |
+
reasoning: str
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ConditionAnalysis(BaseModel):
|
| 33 |
+
"""Condition assessment from vision analysis."""
|
| 34 |
+
|
| 35 |
+
level: ConditionLevel
|
| 36 |
+
confidence: float = Field(..., ge=0, le=1)
|
| 37 |
+
reasoning: str
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class DetectedMaterial(BaseModel):
|
| 41 |
+
"""Material detected in image by vision model."""
|
| 42 |
+
|
| 43 |
+
type: MaterialType
|
| 44 |
+
category: MaterialCategory
|
| 45 |
+
confidence: float = Field(..., ge=0, le=1)
|
| 46 |
+
location_description: Optional[str] = None
|
| 47 |
+
bounding_box: Optional[BoundingBox] = None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class CombustionIndicators(BaseModel):
|
| 51 |
+
"""Combustion particle indicators from vision analysis."""
|
| 52 |
+
|
| 53 |
+
soot_visible: bool = False
|
| 54 |
+
soot_pattern: Optional[str] = None
|
| 55 |
+
char_visible: bool = False
|
| 56 |
+
char_description: Optional[str] = None
|
| 57 |
+
ash_visible: bool = False
|
| 58 |
+
ash_description: Optional[str] = None
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
class SamplingRecommendation(BaseModel):
|
| 62 |
+
"""Recommended sampling location from vision analysis."""
|
| 63 |
+
|
| 64 |
+
description: str
|
| 65 |
+
sample_type: SampleType
|
| 66 |
+
priority: Priority
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class VisionAnalysisResult(BaseModel):
|
| 70 |
+
"""Complete vision analysis result for a single image.
|
| 71 |
+
|
| 72 |
+
Matches the VISION_OUTPUT_SCHEMA from the technical spec.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
zone: ZoneAnalysis
|
| 76 |
+
condition: ConditionAnalysis
|
| 77 |
+
materials: list[DetectedMaterial] = Field(default_factory=list)
|
| 78 |
+
combustion_indicators: CombustionIndicators
|
| 79 |
+
structural_concerns: list[str] = Field(default_factory=list)
|
| 80 |
+
access_issues: list[str] = Field(default_factory=list)
|
| 81 |
+
recommended_sampling_locations: list[SamplingRecommendation] = Field(default_factory=list)
|
| 82 |
+
flags_for_review: list[str] = Field(default_factory=list)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
# --- Calculation Results ---
|
| 86 |
+
|
| 87 |
+
class RoomAreaSummary(BaseModel):
|
| 88 |
+
"""Area summary for a single room."""
|
| 89 |
+
|
| 90 |
+
floor_area: float
|
| 91 |
+
surface_area: float
|
| 92 |
+
volume: float
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class SurfaceAreas(BaseModel):
|
| 96 |
+
"""Surface area calculations by various groupings."""
|
| 97 |
+
|
| 98 |
+
by_type: dict[str, float] = Field(default_factory=dict)
|
| 99 |
+
by_disposition: dict[str, float] = Field(default_factory=dict)
|
| 100 |
+
by_zone: dict[str, float] = Field(default_factory=dict)
|
| 101 |
+
by_room: dict[str, RoomAreaSummary] = Field(default_factory=dict)
|
| 102 |
+
total_floor_sf: float = 0
|
| 103 |
+
total_surface_sf: float = 0
|
| 104 |
+
total_volume_cf: float = 0
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class AirFiltration(BaseModel):
|
| 108 |
+
"""Air filtration calculation results per NADCA ACR 2021."""
|
| 109 |
+
|
| 110 |
+
total_volume_cf: float
|
| 111 |
+
required_ach: int = 4
|
| 112 |
+
unit_cfm: int = 2000
|
| 113 |
+
units_required: int
|
| 114 |
+
calculation: str
|
| 115 |
+
standard_reference: str = "NADCA ACR 2021, Section 3.6"
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
class SampleDensity(BaseModel):
|
| 119 |
+
"""Sample density recommendations per FDAM 2.3."""
|
| 120 |
+
|
| 121 |
+
total_sf: float
|
| 122 |
+
size_category: str
|
| 123 |
+
surface_types_count: int
|
| 124 |
+
surface_types: list[str] = Field(default_factory=list)
|
| 125 |
+
tape_lifts_per_type: str
|
| 126 |
+
surface_wipes_per_type: str
|
| 127 |
+
recommended_tape_lifts: int
|
| 128 |
+
recommended_surface_wipes: int
|
| 129 |
+
ceiling_deck_note: Optional[str] = None
|
| 130 |
+
control_samples_recommended: bool = True
|
| 131 |
+
control_sample_note: str = "Control samples from unaffected areas recommended for baseline comparison"
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
class LaborEstimate(BaseModel):
|
| 135 |
+
"""Labor hour estimates by task."""
|
| 136 |
+
|
| 137 |
+
hepa_vacuum: float = 0
|
| 138 |
+
wet_wipe: float = 0
|
| 139 |
+
dry_sponge: float = 0
|
| 140 |
+
power_wash: float = 0
|
| 141 |
+
scrubber: float = 0
|
| 142 |
+
removal: float = 0
|
| 143 |
+
hvac_cleaning: float = 0
|
| 144 |
+
total_hours: float = 0
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class EquipmentRequirements(BaseModel):
|
| 148 |
+
"""Equipment requirements for the project."""
|
| 149 |
+
|
| 150 |
+
air_scrubbers: int = 0
|
| 151 |
+
hepa_vacuums: int = 0
|
| 152 |
+
negative_air_machines: int = 0
|
| 153 |
+
dehumidifiers: int = 0
|
| 154 |
+
notes: list[str] = Field(default_factory=list)
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
class RegulatoryFlag(BaseModel):
|
| 158 |
+
"""Regulatory flag for potential hazards."""
|
| 159 |
+
|
| 160 |
+
flag_type: str
|
| 161 |
+
description: str
|
| 162 |
+
recommendation: str
|
| 163 |
+
reference: str
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
class RegulatoryFlags(BaseModel):
|
| 167 |
+
"""Regulatory flags based on construction era and facility type."""
|
| 168 |
+
|
| 169 |
+
lead_paint_flag: Optional[RegulatoryFlag] = None
|
| 170 |
+
acm_flag: Optional[RegulatoryFlag] = None
|
| 171 |
+
other_flags: list[RegulatoryFlag] = Field(default_factory=list)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
class CalculationResults(BaseModel):
|
| 175 |
+
"""All calculation results from FDAM logic engine."""
|
| 176 |
+
|
| 177 |
+
surface_areas: SurfaceAreas
|
| 178 |
+
air_filtration: AirFiltration
|
| 179 |
+
sample_density: SampleDensity
|
| 180 |
+
labor_estimate: LaborEstimate
|
| 181 |
+
equipment: EquipmentRequirements
|
| 182 |
+
regulatory_flags: RegulatoryFlags
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
# --- Document Output ---
|
| 186 |
+
|
| 187 |
+
class GeneratedDocuments(BaseModel):
|
| 188 |
+
"""Generated document outputs."""
|
| 189 |
+
|
| 190 |
+
cleaning_specification_md: str = Field(..., description="Cleaning Specification / SOW in Markdown")
|
| 191 |
+
sampling_plan_md: Optional[str] = Field(None, description="Sampling plan recommendations in Markdown")
|
| 192 |
+
confidence_report_md: Optional[str] = Field(None, description="Confidence report in Markdown")
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# --- Confidence Report ---
|
| 196 |
+
|
| 197 |
+
class FlaggedItem(BaseModel):
|
| 198 |
+
"""Item flagged for professional review."""
|
| 199 |
+
|
| 200 |
+
type: str
|
| 201 |
+
room: Optional[str] = None
|
| 202 |
+
surface: Optional[str] = None
|
| 203 |
+
image_id: Optional[str] = None
|
| 204 |
+
confidence: Optional[float] = None
|
| 205 |
+
recommendation: str
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class ConfidenceReport(BaseModel):
|
| 209 |
+
"""Confidence report for assessment."""
|
| 210 |
+
|
| 211 |
+
flagged_items: list[FlaggedItem] = Field(default_factory=list)
|
| 212 |
+
overall_confidence: float = Field(..., ge=0, le=1)
|
| 213 |
+
review_required: bool = False
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
# --- Complete Assessment Output ---
|
| 217 |
+
|
| 218 |
+
class AssessmentOutput(BaseModel):
|
| 219 |
+
"""Complete output from FDAM AI assessment pipeline."""
|
| 220 |
+
|
| 221 |
+
# Original input (with AI-enriched fields)
|
| 222 |
+
input: AssessmentInput
|
| 223 |
+
|
| 224 |
+
# Vision analysis results (by image ID)
|
| 225 |
+
vision_results: dict[str, VisionAnalysisResult] = Field(default_factory=dict)
|
| 226 |
+
|
| 227 |
+
# Calculation results
|
| 228 |
+
calculations: CalculationResults
|
| 229 |
+
|
| 230 |
+
# Generated documents
|
| 231 |
+
documents: GeneratedDocuments
|
| 232 |
+
|
| 233 |
+
# Confidence report
|
| 234 |
+
confidence_report: ConfidenceReport
|
| 235 |
+
|
| 236 |
+
# Processing metadata
|
| 237 |
+
processing_time_seconds: Optional[float] = None
|
| 238 |
+
model_versions: dict[str, str] = Field(default_factory=dict)
|
tests/__init__.py
ADDED
|
File without changes
|
tests/test_pdf_generator.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for PDF generation module."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
import tempfile
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
from pipeline.pdf_generator import PDFGenerator, PDFResult, generate_sow_pdf, SOW_CSS
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TestPDFGenerator:
|
| 11 |
+
"""Test PDF generator functionality."""
|
| 12 |
+
|
| 13 |
+
@pytest.fixture
|
| 14 |
+
def generator(self):
|
| 15 |
+
"""Create PDF generator instance."""
|
| 16 |
+
return PDFGenerator()
|
| 17 |
+
|
| 18 |
+
@pytest.fixture
|
| 19 |
+
def sample_markdown(self):
|
| 20 |
+
"""Sample markdown for testing."""
|
| 21 |
+
return """# Test Document
|
| 22 |
+
|
| 23 |
+
## Section One
|
| 24 |
+
|
| 25 |
+
This is a test paragraph with **bold** and *italic* text.
|
| 26 |
+
|
| 27 |
+
| Column A | Column B |
|
| 28 |
+
|----------|----------|
|
| 29 |
+
| Value 1 | Value 2 |
|
| 30 |
+
| Value 3 | Value 4 |
|
| 31 |
+
|
| 32 |
+
## Section Two
|
| 33 |
+
|
| 34 |
+
- Bullet point one
|
| 35 |
+
- Bullet point two
|
| 36 |
+
- Bullet point three
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
*Generated by test*
|
| 41 |
+
"""
|
| 42 |
+
|
| 43 |
+
def test_weasyprint_available(self, generator):
|
| 44 |
+
"""Test that WeasyPrint is detected as available."""
|
| 45 |
+
assert generator.weasyprint_available is True
|
| 46 |
+
|
| 47 |
+
def test_markdown_to_html(self, generator, sample_markdown):
|
| 48 |
+
"""Test markdown to HTML conversion."""
|
| 49 |
+
html = generator.markdown_to_html(sample_markdown)
|
| 50 |
+
|
| 51 |
+
assert "<!DOCTYPE html>" in html
|
| 52 |
+
assert "<html>" in html
|
| 53 |
+
assert "<style>" in html
|
| 54 |
+
# Note: markdown library adds id attribute to headers (from TOC extension)
|
| 55 |
+
assert "<h1" in html and "Test Document</h1>" in html
|
| 56 |
+
assert "<table>" in html
|
| 57 |
+
assert "<strong>bold</strong>" in html
|
| 58 |
+
|
| 59 |
+
def test_markdown_to_html_includes_css(self, generator, sample_markdown):
|
| 60 |
+
"""Test that HTML includes CSS styling."""
|
| 61 |
+
html = generator.markdown_to_html(sample_markdown)
|
| 62 |
+
|
| 63 |
+
# Check key CSS rules are included
|
| 64 |
+
assert "font-family" in html
|
| 65 |
+
assert "border-collapse" in html
|
| 66 |
+
assert "@page" in html
|
| 67 |
+
|
| 68 |
+
def test_generate_pdf_success(self, generator, sample_markdown):
|
| 69 |
+
"""Test successful PDF generation."""
|
| 70 |
+
result = generator.generate_pdf(sample_markdown)
|
| 71 |
+
|
| 72 |
+
assert isinstance(result, PDFResult)
|
| 73 |
+
assert result.success is True
|
| 74 |
+
assert result.pdf_path is not None
|
| 75 |
+
assert result.error_message is None
|
| 76 |
+
assert result.file_size_bytes > 0
|
| 77 |
+
|
| 78 |
+
# Verify file exists
|
| 79 |
+
pdf_path = Path(result.pdf_path)
|
| 80 |
+
assert pdf_path.exists()
|
| 81 |
+
assert pdf_path.suffix == ".pdf"
|
| 82 |
+
|
| 83 |
+
# Clean up
|
| 84 |
+
pdf_path.unlink()
|
| 85 |
+
|
| 86 |
+
def test_generate_pdf_with_custom_path(self, generator, sample_markdown):
|
| 87 |
+
"""Test PDF generation with custom output path."""
|
| 88 |
+
with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as f:
|
| 89 |
+
output_path = f.name
|
| 90 |
+
|
| 91 |
+
result = generator.generate_pdf(sample_markdown, output_path=output_path)
|
| 92 |
+
|
| 93 |
+
assert result.success is True
|
| 94 |
+
assert result.pdf_path == output_path
|
| 95 |
+
|
| 96 |
+
# Clean up
|
| 97 |
+
Path(output_path).unlink()
|
| 98 |
+
|
| 99 |
+
def test_generate_pdf_empty_content(self, generator):
|
| 100 |
+
"""Test PDF generation with empty content."""
|
| 101 |
+
result = generator.generate_pdf("")
|
| 102 |
+
|
| 103 |
+
# Should still succeed with empty content
|
| 104 |
+
assert result.success is True
|
| 105 |
+
assert result.pdf_path is not None
|
| 106 |
+
|
| 107 |
+
# Clean up
|
| 108 |
+
Path(result.pdf_path).unlink()
|
| 109 |
+
|
| 110 |
+
def test_generate_pdf_complex_tables(self, generator):
|
| 111 |
+
"""Test PDF with complex table content."""
|
| 112 |
+
markdown = """# Thresholds
|
| 113 |
+
|
| 114 |
+
| Metal | Non-Operational | Operational | Unit |
|
| 115 |
+
|-------|-----------------|-------------|------|
|
| 116 |
+
| Lead | 22 | 500 | µg/100cm² |
|
| 117 |
+
| Cadmium | 3.3 | 50 | µg/100cm² |
|
| 118 |
+
| Arsenic | 6.7 | 100 | µg/100cm² |
|
| 119 |
+
|
| 120 |
+
## Notes
|
| 121 |
+
|
| 122 |
+
Special characters: µ, °, ², ™
|
| 123 |
+
"""
|
| 124 |
+
result = generator.generate_pdf(markdown)
|
| 125 |
+
|
| 126 |
+
assert result.success is True
|
| 127 |
+
assert result.file_size_bytes > 0
|
| 128 |
+
|
| 129 |
+
# Clean up
|
| 130 |
+
Path(result.pdf_path).unlink()
|
| 131 |
+
|
| 132 |
+
def test_generate_html_fallback(self, generator, sample_markdown):
|
| 133 |
+
"""Test HTML generation as fallback."""
|
| 134 |
+
success, html_path, error = generator.generate_html(sample_markdown)
|
| 135 |
+
|
| 136 |
+
assert success is True
|
| 137 |
+
assert html_path is not None
|
| 138 |
+
assert error is None
|
| 139 |
+
|
| 140 |
+
# Verify file exists and contains HTML
|
| 141 |
+
html_path = Path(html_path)
|
| 142 |
+
assert html_path.exists()
|
| 143 |
+
content = html_path.read_text()
|
| 144 |
+
assert "<html>" in content
|
| 145 |
+
|
| 146 |
+
# Clean up
|
| 147 |
+
html_path.unlink()
|
| 148 |
+
|
| 149 |
+
def test_custom_css(self):
|
| 150 |
+
"""Test PDF generator with custom CSS."""
|
| 151 |
+
custom_css = """
|
| 152 |
+
body { font-family: monospace; }
|
| 153 |
+
h1 { color: red; }
|
| 154 |
+
"""
|
| 155 |
+
generator = PDFGenerator(custom_css=custom_css)
|
| 156 |
+
|
| 157 |
+
html = generator.markdown_to_html("# Test")
|
| 158 |
+
assert "font-family: monospace" in html
|
| 159 |
+
assert "color: red" in html
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
class TestGenerateSowPdf:
|
| 163 |
+
"""Test the convenience function."""
|
| 164 |
+
|
| 165 |
+
def test_generate_sow_pdf(self):
|
| 166 |
+
"""Test generate_sow_pdf convenience function."""
|
| 167 |
+
markdown = """# Scope of Work
|
| 168 |
+
|
| 169 |
+
## Project: Test Fire
|
| 170 |
+
|
| 171 |
+
| Field | Value |
|
| 172 |
+
|-------|-------|
|
| 173 |
+
| Client | ACME Corp |
|
| 174 |
+
| Date | 2024-01-15 |
|
| 175 |
+
|
| 176 |
+
## Recommendations
|
| 177 |
+
|
| 178 |
+
- Clean all surfaces
|
| 179 |
+
- HEPA vacuum required
|
| 180 |
+
"""
|
| 181 |
+
result = generate_sow_pdf(
|
| 182 |
+
markdown_content=markdown,
|
| 183 |
+
project_name="Test Fire",
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
assert result.success is True
|
| 187 |
+
assert result.pdf_path is not None
|
| 188 |
+
|
| 189 |
+
# Clean up
|
| 190 |
+
Path(result.pdf_path).unlink()
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
class TestSOWCSS:
|
| 194 |
+
"""Test CSS styling constants."""
|
| 195 |
+
|
| 196 |
+
def test_sow_css_exists(self):
|
| 197 |
+
"""Test that SOW_CSS is defined."""
|
| 198 |
+
assert SOW_CSS is not None
|
| 199 |
+
assert len(SOW_CSS) > 0
|
| 200 |
+
|
| 201 |
+
def test_sow_css_has_page_settings(self):
|
| 202 |
+
"""Test that CSS includes page settings."""
|
| 203 |
+
assert "@page" in SOW_CSS
|
| 204 |
+
assert "margin" in SOW_CSS
|
| 205 |
+
|
| 206 |
+
def test_sow_css_has_table_styling(self):
|
| 207 |
+
"""Test that CSS includes table styling."""
|
| 208 |
+
assert "table" in SOW_CSS
|
| 209 |
+
assert "border-collapse" in SOW_CSS
|
| 210 |
+
assert "th" in SOW_CSS
|
| 211 |
+
assert "td" in SOW_CSS
|
| 212 |
+
|
| 213 |
+
def test_sow_css_has_header_styling(self):
|
| 214 |
+
"""Test that CSS includes header styling."""
|
| 215 |
+
assert "h1" in SOW_CSS
|
| 216 |
+
assert "h2" in SOW_CSS
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class TestPDFResultDataclass:
|
| 220 |
+
"""Test PDFResult dataclass."""
|
| 221 |
+
|
| 222 |
+
def test_pdf_result_success(self):
|
| 223 |
+
"""Test PDFResult with success."""
|
| 224 |
+
result = PDFResult(
|
| 225 |
+
success=True,
|
| 226 |
+
pdf_path="/tmp/test.pdf",
|
| 227 |
+
file_size_bytes=1000,
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
assert result.success is True
|
| 231 |
+
assert result.pdf_path == "/tmp/test.pdf"
|
| 232 |
+
assert result.error_message is None
|
| 233 |
+
assert result.file_size_bytes == 1000
|
| 234 |
+
|
| 235 |
+
def test_pdf_result_failure(self):
|
| 236 |
+
"""Test PDFResult with failure."""
|
| 237 |
+
result = PDFResult(
|
| 238 |
+
success=False,
|
| 239 |
+
pdf_path=None,
|
| 240 |
+
error_message="Something went wrong",
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
assert result.success is False
|
| 244 |
+
assert result.pdf_path is None
|
| 245 |
+
assert result.error_message == "Something went wrong"
|
| 246 |
+
assert result.file_size_bytes == 0
|
tests/test_pipeline.py
ADDED
|
@@ -0,0 +1,525 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for FDAM Pipeline components."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import io
|
| 6 |
+
|
| 7 |
+
from pipeline.calculations import (
|
| 8 |
+
FDAMCalculator,
|
| 9 |
+
AirFiltrationResult,
|
| 10 |
+
SampleDensityResult,
|
| 11 |
+
RegulatoryFlags,
|
| 12 |
+
MetalsThresholds,
|
| 13 |
+
METALS_THRESHOLDS,
|
| 14 |
+
PARTICULATE_THRESHOLDS,
|
| 15 |
+
)
|
| 16 |
+
from pipeline.dispositions import (
|
| 17 |
+
DispositionEngine,
|
| 18 |
+
DispositionResult,
|
| 19 |
+
SurfaceDisposition,
|
| 20 |
+
DISPOSITION_MATRIX,
|
| 21 |
+
CLEANING_PROTOCOLS,
|
| 22 |
+
)
|
| 23 |
+
from pipeline.generator import DocumentGenerator, GeneratedDocument
|
| 24 |
+
from pipeline.main import FDAMPipeline, PipelineResult, PipelineProgress
|
| 25 |
+
|
| 26 |
+
from ui.state import SessionState, RoomFormData, ImageFormData
|
| 27 |
+
from ui.components import image_store
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
class TestFDAMCalculator:
|
| 31 |
+
"""Test FDAM calculations."""
|
| 32 |
+
|
| 33 |
+
@pytest.fixture
|
| 34 |
+
def calculator(self):
|
| 35 |
+
return FDAMCalculator()
|
| 36 |
+
|
| 37 |
+
def test_air_filtration_basic(self, calculator):
|
| 38 |
+
"""Test basic air filtration calculation."""
|
| 39 |
+
result = calculator.calculate_air_filtration(
|
| 40 |
+
total_area_sf=10000,
|
| 41 |
+
avg_ceiling_height_ft=10,
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
assert isinstance(result, AirFiltrationResult)
|
| 45 |
+
assert result.total_volume_cf == 100000
|
| 46 |
+
assert result.required_ach == 4
|
| 47 |
+
assert result.unit_cfm == 2000
|
| 48 |
+
# (100000 * 4) / (2000 * 60) = 3.33 -> 4 units
|
| 49 |
+
assert result.units_required == 4
|
| 50 |
+
|
| 51 |
+
def test_air_filtration_large_space(self, calculator):
|
| 52 |
+
"""Test air filtration for large space."""
|
| 53 |
+
result = calculator.calculate_air_filtration(
|
| 54 |
+
total_area_sf=50000,
|
| 55 |
+
avg_ceiling_height_ft=30,
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
# 1,500,000 CF * 4 ACH / (2000 * 60) = 50 units
|
| 59 |
+
assert result.units_required == 50
|
| 60 |
+
assert result.total_volume_cf == 1500000
|
| 61 |
+
|
| 62 |
+
def test_air_filtration_minimum_one_unit(self, calculator):
|
| 63 |
+
"""Test minimum 1 unit is required."""
|
| 64 |
+
result = calculator.calculate_air_filtration(
|
| 65 |
+
total_area_sf=100,
|
| 66 |
+
avg_ceiling_height_ft=8,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
assert result.units_required >= 1
|
| 70 |
+
|
| 71 |
+
def test_sample_density_small_area(self, calculator):
|
| 72 |
+
"""Test sample density for small area."""
|
| 73 |
+
result = calculator.calculate_sample_density(
|
| 74 |
+
total_area_sf=3000,
|
| 75 |
+
surface_types_count=3,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
assert isinstance(result, SampleDensityResult)
|
| 79 |
+
assert result.tape_lifts_min == 9 # 3 * 3
|
| 80 |
+
assert result.tape_lifts_max == 15 # 5 * 3
|
| 81 |
+
|
| 82 |
+
def test_sample_density_medium_area(self, calculator):
|
| 83 |
+
"""Test sample density for medium area."""
|
| 84 |
+
result = calculator.calculate_sample_density(
|
| 85 |
+
total_area_sf=15000,
|
| 86 |
+
surface_types_count=3,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
assert result.tape_lifts_min == 15 # 5 * 3
|
| 90 |
+
assert result.tape_lifts_max == 30 # 10 * 3
|
| 91 |
+
|
| 92 |
+
def test_sample_density_ceiling_deck(self, calculator):
|
| 93 |
+
"""Test ceiling deck enhanced sampling."""
|
| 94 |
+
result = calculator.calculate_sample_density(
|
| 95 |
+
total_area_sf=10000,
|
| 96 |
+
has_ceiling_deck=True,
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
# 1 per 2,500 SF = 4 samples
|
| 100 |
+
assert result.ceiling_deck_samples == 4
|
| 101 |
+
|
| 102 |
+
def test_regulatory_flags_pre_1980(self, calculator):
|
| 103 |
+
"""Test regulatory flags for pre-1980 construction."""
|
| 104 |
+
flags = calculator.get_regulatory_flags(
|
| 105 |
+
construction_era="pre-1980",
|
| 106 |
+
facility_classification="non-operational",
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
assert isinstance(flags, RegulatoryFlags)
|
| 110 |
+
assert flags.lbp_survey_required is True
|
| 111 |
+
assert flags.acm_survey_required is True
|
| 112 |
+
assert flags.acm_survey_recommended is False
|
| 113 |
+
|
| 114 |
+
def test_regulatory_flags_1980_2000(self, calculator):
|
| 115 |
+
"""Test regulatory flags for 1980-2000 construction."""
|
| 116 |
+
flags = calculator.get_regulatory_flags(
|
| 117 |
+
construction_era="1980-2000",
|
| 118 |
+
facility_classification="operational",
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
assert flags.lbp_survey_required is False
|
| 122 |
+
assert flags.acm_survey_required is False
|
| 123 |
+
assert flags.acm_survey_recommended is True
|
| 124 |
+
|
| 125 |
+
def test_regulatory_flags_childcare(self, calculator):
|
| 126 |
+
"""Test regulatory flags for public/childcare."""
|
| 127 |
+
flags = calculator.get_regulatory_flags(
|
| 128 |
+
construction_era="post-2000",
|
| 129 |
+
facility_classification="public-childcare",
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
assert flags.enhanced_childcare_thresholds is True
|
| 133 |
+
|
| 134 |
+
def test_metals_thresholds_non_operational(self, calculator):
|
| 135 |
+
"""Test metals thresholds for non-operational facility."""
|
| 136 |
+
thresholds = calculator.get_metals_thresholds("non-operational")
|
| 137 |
+
|
| 138 |
+
assert isinstance(thresholds, MetalsThresholds)
|
| 139 |
+
assert thresholds.lead_ug_100cm2 == 22.0
|
| 140 |
+
assert thresholds.cadmium_ug_100cm2 == 3.3
|
| 141 |
+
assert thresholds.arsenic_ug_100cm2 == 6.7
|
| 142 |
+
|
| 143 |
+
def test_metals_thresholds_operational(self, calculator):
|
| 144 |
+
"""Test metals thresholds for operational facility."""
|
| 145 |
+
thresholds = calculator.get_metals_thresholds("operational")
|
| 146 |
+
|
| 147 |
+
assert thresholds.lead_ug_100cm2 == 500.0
|
| 148 |
+
assert thresholds.cadmium_ug_100cm2 == 50.0
|
| 149 |
+
|
| 150 |
+
def test_metals_thresholds_childcare(self, calculator):
|
| 151 |
+
"""Test metals thresholds for childcare facility."""
|
| 152 |
+
thresholds = calculator.get_metals_thresholds("public-childcare")
|
| 153 |
+
|
| 154 |
+
# EPA/HUD October 2024 for floors
|
| 155 |
+
assert thresholds.lead_ug_100cm2 == 4.3
|
| 156 |
+
|
| 157 |
+
def test_particulate_thresholds_exist(self):
|
| 158 |
+
"""Test particulate thresholds are defined."""
|
| 159 |
+
assert "ash_char" in PARTICULATE_THRESHOLDS
|
| 160 |
+
assert "aciniform_soot" in PARTICULATE_THRESHOLDS
|
| 161 |
+
assert PARTICULATE_THRESHOLDS["ash_char"]["clearance"] == 150
|
| 162 |
+
assert PARTICULATE_THRESHOLDS["aciniform_soot"]["clearance"] == 500
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class TestDispositionEngine:
|
| 166 |
+
"""Test disposition determination."""
|
| 167 |
+
|
| 168 |
+
@pytest.fixture
|
| 169 |
+
def engine(self):
|
| 170 |
+
return DispositionEngine()
|
| 171 |
+
|
| 172 |
+
def test_disposition_background(self, engine):
|
| 173 |
+
"""Test disposition for background condition."""
|
| 174 |
+
result = engine.determine_disposition(
|
| 175 |
+
zone="far-field",
|
| 176 |
+
condition="background",
|
| 177 |
+
use_rag=False,
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
assert isinstance(result, DispositionResult)
|
| 181 |
+
assert result.disposition == "no-action"
|
| 182 |
+
assert result.confidence == 1.0
|
| 183 |
+
|
| 184 |
+
def test_disposition_structural_damage(self, engine):
|
| 185 |
+
"""Test disposition for structural damage."""
|
| 186 |
+
result = engine.determine_disposition(
|
| 187 |
+
zone="burn-zone",
|
| 188 |
+
condition="structural-damage",
|
| 189 |
+
use_rag=False,
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
assert result.disposition == "remove-repair"
|
| 193 |
+
assert result.confidence == 1.0
|
| 194 |
+
|
| 195 |
+
def test_disposition_far_field_light(self, engine):
|
| 196 |
+
"""Test disposition for far-field light condition."""
|
| 197 |
+
result = engine.determine_disposition(
|
| 198 |
+
zone="far-field",
|
| 199 |
+
condition="light",
|
| 200 |
+
use_rag=False,
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
assert result.disposition == "clean"
|
| 204 |
+
assert "standard" in result.protocol.lower()
|
| 205 |
+
|
| 206 |
+
def test_disposition_near_field_heavy(self, engine):
|
| 207 |
+
"""Test disposition for near-field heavy condition."""
|
| 208 |
+
result = engine.determine_disposition(
|
| 209 |
+
zone="near-field",
|
| 210 |
+
condition="heavy",
|
| 211 |
+
use_rag=False,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
assert result.disposition == "clean"
|
| 215 |
+
assert "aggressive" in result.protocol.lower()
|
| 216 |
+
|
| 217 |
+
def test_cleaning_method_drywall(self, engine):
|
| 218 |
+
"""Test cleaning method for drywall."""
|
| 219 |
+
method = engine.get_cleaning_method(
|
| 220 |
+
surface_type="drywall",
|
| 221 |
+
condition="moderate",
|
| 222 |
+
use_rag=False,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
assert "HEPA" in method["method"]
|
| 226 |
+
assert method["surface_type"] == "drywall"
|
| 227 |
+
|
| 228 |
+
def test_cleaning_method_concrete(self, engine):
|
| 229 |
+
"""Test cleaning method for concrete."""
|
| 230 |
+
method = engine.get_cleaning_method(
|
| 231 |
+
surface_type="concrete-floor",
|
| 232 |
+
condition="heavy",
|
| 233 |
+
use_rag=False,
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
assert "scrubber" in method["method"].lower()
|
| 237 |
+
assert "multiple passes" in method["method"].lower()
|
| 238 |
+
|
| 239 |
+
def test_disposition_matrix_completeness(self):
|
| 240 |
+
"""Test disposition matrix covers expected combinations."""
|
| 241 |
+
# Key combinations should be in matrix
|
| 242 |
+
assert ("far-field", "light") in DISPOSITION_MATRIX
|
| 243 |
+
assert ("near-field", "moderate") in DISPOSITION_MATRIX
|
| 244 |
+
assert ("burn-zone", "heavy") in DISPOSITION_MATRIX
|
| 245 |
+
assert ("any", "background") in DISPOSITION_MATRIX
|
| 246 |
+
assert ("any", "structural-damage") in DISPOSITION_MATRIX
|
| 247 |
+
|
| 248 |
+
def test_cleaning_protocols_exist(self):
|
| 249 |
+
"""Test cleaning protocols are defined."""
|
| 250 |
+
assert "standard" in CLEANING_PROTOCOLS
|
| 251 |
+
assert "full" in CLEANING_PROTOCOLS
|
| 252 |
+
assert "aggressive" in CLEANING_PROTOCOLS
|
| 253 |
+
|
| 254 |
+
for protocol in CLEANING_PROTOCOLS.values():
|
| 255 |
+
assert "name" in protocol
|
| 256 |
+
assert "steps" in protocol
|
| 257 |
+
assert len(protocol["steps"]) > 0
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class TestDocumentGenerator:
|
| 261 |
+
"""Test document generation."""
|
| 262 |
+
|
| 263 |
+
@pytest.fixture
|
| 264 |
+
def generator(self):
|
| 265 |
+
return DocumentGenerator()
|
| 266 |
+
|
| 267 |
+
@pytest.fixture
|
| 268 |
+
def sample_session(self):
|
| 269 |
+
session = SessionState()
|
| 270 |
+
session.project.project_name = "Test Fire Project"
|
| 271 |
+
session.project.address = "123 Main St"
|
| 272 |
+
session.project.city = "Springfield"
|
| 273 |
+
session.project.state = "IL"
|
| 274 |
+
session.project.zip_code = "62701"
|
| 275 |
+
session.project.client_name = "Test Client"
|
| 276 |
+
session.project.fire_date = "2024-01-01"
|
| 277 |
+
session.project.assessment_date = "2024-01-15"
|
| 278 |
+
session.project.facility_classification = "non-operational"
|
| 279 |
+
session.project.construction_era = "pre-1980"
|
| 280 |
+
session.project.assessor_name = "John Doe"
|
| 281 |
+
session.project.assessor_credentials = "CIH"
|
| 282 |
+
|
| 283 |
+
session.rooms.append(
|
| 284 |
+
RoomFormData(
|
| 285 |
+
id="room-001",
|
| 286 |
+
name="Main Hall",
|
| 287 |
+
length_ft=50,
|
| 288 |
+
width_ft=30,
|
| 289 |
+
ceiling_height_ft=12,
|
| 290 |
+
)
|
| 291 |
+
)
|
| 292 |
+
return session
|
| 293 |
+
|
| 294 |
+
@pytest.fixture
|
| 295 |
+
def sample_calculations(self):
|
| 296 |
+
calc = FDAMCalculator()
|
| 297 |
+
return {
|
| 298 |
+
"total_area_sf": 1500,
|
| 299 |
+
"total_volume_cf": 18000,
|
| 300 |
+
"avg_ceiling_height_ft": 12,
|
| 301 |
+
"air_filtration": calc.calculate_air_filtration(1500, 12),
|
| 302 |
+
"sample_density": calc.calculate_sample_density(1500),
|
| 303 |
+
"regulatory_flags": calc.get_regulatory_flags("pre-1980", "non-operational"),
|
| 304 |
+
"metals_thresholds": calc.get_metals_thresholds("non-operational"),
|
| 305 |
+
"particulate_thresholds": PARTICULATE_THRESHOLDS,
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
def test_generate_sow_basic(self, generator, sample_session, sample_calculations):
|
| 309 |
+
"""Test basic SOW generation."""
|
| 310 |
+
doc = generator.generate_sow(
|
| 311 |
+
session=sample_session,
|
| 312 |
+
vision_results={},
|
| 313 |
+
surface_dispositions=[],
|
| 314 |
+
calculations=sample_calculations,
|
| 315 |
+
)
|
| 316 |
+
|
| 317 |
+
assert isinstance(doc, GeneratedDocument)
|
| 318 |
+
assert "Test Fire Project" in doc.markdown
|
| 319 |
+
assert "Cleaning Specification" in doc.markdown
|
| 320 |
+
assert doc.word_count > 0
|
| 321 |
+
|
| 322 |
+
def test_generate_sow_sections(self, generator, sample_session, sample_calculations):
|
| 323 |
+
"""Test SOW contains required sections."""
|
| 324 |
+
doc = generator.generate_sow(
|
| 325 |
+
session=sample_session,
|
| 326 |
+
vision_results={},
|
| 327 |
+
surface_dispositions=[],
|
| 328 |
+
calculations=sample_calculations,
|
| 329 |
+
)
|
| 330 |
+
|
| 331 |
+
# Check for key sections
|
| 332 |
+
assert "## Project Information" in doc.markdown
|
| 333 |
+
assert "## Scope Summary" in doc.markdown
|
| 334 |
+
assert "## Room Inventory" in doc.markdown
|
| 335 |
+
assert "## Air Filtration Requirements" in doc.markdown
|
| 336 |
+
assert "## Regulatory Requirements" in doc.markdown
|
| 337 |
+
assert "## Clearance Thresholds" in doc.markdown
|
| 338 |
+
|
| 339 |
+
def test_generate_sow_with_dispositions(self, generator, sample_session, sample_calculations):
|
| 340 |
+
"""Test SOW generation with dispositions."""
|
| 341 |
+
dispositions = [
|
| 342 |
+
SurfaceDisposition(
|
| 343 |
+
surface_type="drywall",
|
| 344 |
+
room_name="Main Hall",
|
| 345 |
+
zone="near-field",
|
| 346 |
+
condition="moderate",
|
| 347 |
+
disposition="clean",
|
| 348 |
+
cleaning_method="HEPA vacuum → Wet wipe",
|
| 349 |
+
)
|
| 350 |
+
]
|
| 351 |
+
|
| 352 |
+
doc = generator.generate_sow(
|
| 353 |
+
session=sample_session,
|
| 354 |
+
vision_results={},
|
| 355 |
+
surface_dispositions=dispositions,
|
| 356 |
+
calculations=sample_calculations,
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
assert "drywall" in doc.markdown.lower()
|
| 360 |
+
assert "CLEAN" in doc.markdown
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
class TestFDAMPipeline:
|
| 364 |
+
"""Test full pipeline execution."""
|
| 365 |
+
|
| 366 |
+
@pytest.fixture
|
| 367 |
+
def pipeline(self):
|
| 368 |
+
return FDAMPipeline()
|
| 369 |
+
|
| 370 |
+
@pytest.fixture
|
| 371 |
+
def valid_session(self):
|
| 372 |
+
"""Create a valid session for pipeline testing."""
|
| 373 |
+
session = SessionState()
|
| 374 |
+
session.project.project_name = "Pipeline Test"
|
| 375 |
+
session.project.address = "456 Oak Ave"
|
| 376 |
+
session.project.city = "Chicago"
|
| 377 |
+
session.project.state = "IL"
|
| 378 |
+
session.project.zip_code = "60601"
|
| 379 |
+
session.project.client_name = "Test Corp"
|
| 380 |
+
session.project.fire_date = "2024-06-01"
|
| 381 |
+
session.project.assessment_date = "2024-06-15"
|
| 382 |
+
session.project.facility_classification = "operational"
|
| 383 |
+
session.project.construction_era = "post-2000"
|
| 384 |
+
session.project.assessor_name = "Jane Smith"
|
| 385 |
+
|
| 386 |
+
session.rooms.append(
|
| 387 |
+
RoomFormData(
|
| 388 |
+
id="room-001",
|
| 389 |
+
name="Office A",
|
| 390 |
+
length_ft=20,
|
| 391 |
+
width_ft=15,
|
| 392 |
+
ceiling_height_ft=10,
|
| 393 |
+
)
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
# Add image metadata
|
| 397 |
+
img_id = "test-img-001"
|
| 398 |
+
session.images.append(
|
| 399 |
+
ImageFormData(
|
| 400 |
+
id=img_id,
|
| 401 |
+
filename="test.jpg",
|
| 402 |
+
room_id="room-001",
|
| 403 |
+
)
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
# Store actual image bytes
|
| 407 |
+
test_image = Image.new("RGB", (100, 100), color="red")
|
| 408 |
+
img_bytes = io.BytesIO()
|
| 409 |
+
test_image.save(img_bytes, format="PNG")
|
| 410 |
+
image_store.store(img_id, img_bytes.getvalue())
|
| 411 |
+
|
| 412 |
+
yield session
|
| 413 |
+
|
| 414 |
+
# Cleanup
|
| 415 |
+
image_store.clear()
|
| 416 |
+
|
| 417 |
+
def test_pipeline_execute_success(self, pipeline, valid_session):
|
| 418 |
+
"""Test successful pipeline execution."""
|
| 419 |
+
progress_updates = []
|
| 420 |
+
|
| 421 |
+
def progress_callback(prog):
|
| 422 |
+
progress_updates.append(prog)
|
| 423 |
+
|
| 424 |
+
result = pipeline.execute(
|
| 425 |
+
session=valid_session,
|
| 426 |
+
progress_callback=progress_callback,
|
| 427 |
+
)
|
| 428 |
+
|
| 429 |
+
assert isinstance(result, PipelineResult)
|
| 430 |
+
assert result.success is True
|
| 431 |
+
assert result.document is not None
|
| 432 |
+
assert len(result.annotated_images) > 0
|
| 433 |
+
assert result.execution_time_seconds > 0
|
| 434 |
+
assert len(progress_updates) > 0
|
| 435 |
+
|
| 436 |
+
def test_pipeline_execute_missing_project_name(self, pipeline):
|
| 437 |
+
"""Test pipeline fails with missing project name."""
|
| 438 |
+
session = SessionState()
|
| 439 |
+
# No project name set
|
| 440 |
+
|
| 441 |
+
result = pipeline.execute(session=session)
|
| 442 |
+
|
| 443 |
+
assert result.success is False
|
| 444 |
+
assert len(result.errors) > 0
|
| 445 |
+
assert any("project" in e.lower() for e in result.errors)
|
| 446 |
+
|
| 447 |
+
def test_pipeline_execute_missing_images(self, pipeline):
|
| 448 |
+
"""Test pipeline fails with missing image bytes."""
|
| 449 |
+
session = SessionState()
|
| 450 |
+
session.project.project_name = "Test"
|
| 451 |
+
session.project.address = "123 Main"
|
| 452 |
+
session.project.city = "City"
|
| 453 |
+
session.project.state = "ST"
|
| 454 |
+
session.project.zip_code = "12345"
|
| 455 |
+
session.project.client_name = "Client"
|
| 456 |
+
session.project.fire_date = "2024-01-01"
|
| 457 |
+
session.project.assessment_date = "2024-01-02"
|
| 458 |
+
session.project.assessor_name = "Assessor"
|
| 459 |
+
|
| 460 |
+
session.rooms.append(
|
| 461 |
+
RoomFormData(id="r1", name="Room", length_ft=10, width_ft=10, ceiling_height_ft=10)
|
| 462 |
+
)
|
| 463 |
+
session.images.append(
|
| 464 |
+
ImageFormData(id="missing-img", filename="missing.jpg", room_id="r1")
|
| 465 |
+
)
|
| 466 |
+
# Don't store image bytes
|
| 467 |
+
|
| 468 |
+
result = pipeline.execute(session=session)
|
| 469 |
+
|
| 470 |
+
assert result.success is False
|
| 471 |
+
assert any("image" in e.lower() or "upload" in e.lower() for e in result.errors)
|
| 472 |
+
|
| 473 |
+
def test_pipeline_generates_stats(self, pipeline, valid_session):
|
| 474 |
+
"""Test pipeline generates stats dictionary."""
|
| 475 |
+
result = pipeline.execute(session=valid_session)
|
| 476 |
+
|
| 477 |
+
stats = pipeline.generate_stats_dict(result)
|
| 478 |
+
|
| 479 |
+
assert "project_name" in stats
|
| 480 |
+
assert "total_rooms" in stats
|
| 481 |
+
assert "air_scrubbers_required" in stats
|
| 482 |
+
assert "execution_time" in stats
|
| 483 |
+
|
| 484 |
+
def test_pipeline_progress_stages(self, pipeline, valid_session):
|
| 485 |
+
"""Test pipeline reports all 6 stages."""
|
| 486 |
+
stages_seen = set()
|
| 487 |
+
|
| 488 |
+
def progress_callback(prog):
|
| 489 |
+
stages_seen.add(prog.stage)
|
| 490 |
+
|
| 491 |
+
pipeline.execute(
|
| 492 |
+
session=valid_session,
|
| 493 |
+
progress_callback=progress_callback,
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
# Should see stages 1-6
|
| 497 |
+
assert len(stages_seen) >= 5 # At least most stages
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
class TestIntegration:
|
| 501 |
+
"""Integration tests for pipeline with RAG."""
|
| 502 |
+
|
| 503 |
+
def test_calculator_with_session(self):
|
| 504 |
+
"""Test calculator with real session data."""
|
| 505 |
+
session = SessionState()
|
| 506 |
+
session.project.facility_classification = "non-operational"
|
| 507 |
+
session.project.construction_era = "pre-1980"
|
| 508 |
+
session.rooms.append(
|
| 509 |
+
RoomFormData(
|
| 510 |
+
id="r1",
|
| 511 |
+
name="Room 1",
|
| 512 |
+
length_ft=100,
|
| 513 |
+
width_ft=50,
|
| 514 |
+
ceiling_height_ft=15,
|
| 515 |
+
)
|
| 516 |
+
)
|
| 517 |
+
|
| 518 |
+
calc = FDAMCalculator()
|
| 519 |
+
results = calc.calculate_from_session(session)
|
| 520 |
+
|
| 521 |
+
assert results["total_area_sf"] == 5000
|
| 522 |
+
assert results["total_volume_cf"] == 75000
|
| 523 |
+
assert results["air_filtration"].units_required > 0
|
| 524 |
+
assert results["regulatory_flags"].lbp_survey_required is True
|
| 525 |
+
assert results["metals_thresholds"].lead_ug_100cm2 == 22.0
|
tests/test_rag.py
ADDED
|
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for RAG (Retrieval Augmented Generation) components."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import tempfile
|
| 6 |
+
import shutil
|
| 7 |
+
|
| 8 |
+
from rag.chunker import SemanticChunker, Chunk, chunk_file
|
| 9 |
+
from rag.vectorstore import ChromaVectorStore, MockEmbeddingFunction
|
| 10 |
+
from rag.retriever import FDAMRetriever, MockReranker, RetrievalResult
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class TestSemanticChunker:
|
| 14 |
+
"""Test semantic chunker with table preservation."""
|
| 15 |
+
|
| 16 |
+
def test_chunk_simple_document(self):
|
| 17 |
+
"""Test chunking a simple markdown document."""
|
| 18 |
+
text = """## Introduction
|
| 19 |
+
|
| 20 |
+
This is the introduction paragraph with some content.
|
| 21 |
+
|
| 22 |
+
## Section One
|
| 23 |
+
|
| 24 |
+
This section contains important information about the topic.
|
| 25 |
+
It has multiple sentences to form a proper paragraph.
|
| 26 |
+
|
| 27 |
+
## Section Two
|
| 28 |
+
|
| 29 |
+
Another section with different content here.
|
| 30 |
+
"""
|
| 31 |
+
chunker = SemanticChunker()
|
| 32 |
+
chunks = chunker.chunk_document(
|
| 33 |
+
text=text,
|
| 34 |
+
source="test.md",
|
| 35 |
+
category="methodology",
|
| 36 |
+
priority="primary",
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
assert len(chunks) >= 1
|
| 40 |
+
assert all(isinstance(c, Chunk) for c in chunks)
|
| 41 |
+
assert all(c.source == "test.md" for c in chunks)
|
| 42 |
+
assert all(c.category == "methodology" for c in chunks)
|
| 43 |
+
assert all(c.priority == "primary" for c in chunks)
|
| 44 |
+
|
| 45 |
+
def test_preserve_tables(self):
|
| 46 |
+
"""Test that tables are kept intact and not split."""
|
| 47 |
+
text = """## Thresholds
|
| 48 |
+
|
| 49 |
+
| Material | Threshold | Unit |
|
| 50 |
+
|----------|-----------|------|
|
| 51 |
+
| Lead | 22 | µg/100cm² |
|
| 52 |
+
| Cadmium | 3.3 | µg/100cm² |
|
| 53 |
+
| Arsenic | 6.7 | µg/100cm² |
|
| 54 |
+
|
| 55 |
+
## Next Section
|
| 56 |
+
|
| 57 |
+
Some content after the table.
|
| 58 |
+
"""
|
| 59 |
+
chunker = SemanticChunker()
|
| 60 |
+
chunks = chunker.chunk_document(
|
| 61 |
+
text=text,
|
| 62 |
+
source="thresholds.md",
|
| 63 |
+
category="thresholds",
|
| 64 |
+
priority="reference-threshold",
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
# Find the table chunk
|
| 68 |
+
table_chunks = [c for c in chunks if c.content_type == "table"]
|
| 69 |
+
assert len(table_chunks) >= 1
|
| 70 |
+
|
| 71 |
+
# Table should be complete
|
| 72 |
+
table_chunk = table_chunks[0]
|
| 73 |
+
assert "Lead" in table_chunk.text
|
| 74 |
+
assert "Cadmium" in table_chunk.text
|
| 75 |
+
assert "Arsenic" in table_chunk.text
|
| 76 |
+
assert "|" in table_chunk.text
|
| 77 |
+
|
| 78 |
+
def test_extract_keywords(self):
|
| 79 |
+
"""Test keyword extraction from text."""
|
| 80 |
+
text = """## Zone Classification
|
| 81 |
+
|
| 82 |
+
The burn zone shows heavy soot deposits and structural damage.
|
| 83 |
+
Lead contamination requires HEPA vacuum cleaning per OSHA standards.
|
| 84 |
+
"""
|
| 85 |
+
chunker = SemanticChunker()
|
| 86 |
+
chunks = chunker.chunk_document(
|
| 87 |
+
text=text,
|
| 88 |
+
source="zones.md",
|
| 89 |
+
category="methodology",
|
| 90 |
+
priority="primary",
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
# Should extract relevant keywords
|
| 94 |
+
all_keywords = []
|
| 95 |
+
for chunk in chunks:
|
| 96 |
+
all_keywords.extend(chunk.keywords)
|
| 97 |
+
|
| 98 |
+
# Check for expected domain keywords
|
| 99 |
+
keyword_set = set(all_keywords)
|
| 100 |
+
assert "burn zone" in keyword_set or "heavy" in keyword_set
|
| 101 |
+
assert "soot" in keyword_set or "structural damage" in keyword_set
|
| 102 |
+
|
| 103 |
+
def test_chunk_metadata(self):
|
| 104 |
+
"""Test chunk metadata conversion."""
|
| 105 |
+
chunk = Chunk(
|
| 106 |
+
id="test_001",
|
| 107 |
+
text="Test content",
|
| 108 |
+
source="test.md",
|
| 109 |
+
category="methodology",
|
| 110 |
+
section="## Section 1",
|
| 111 |
+
priority="primary",
|
| 112 |
+
content_type="narrative",
|
| 113 |
+
keywords=["lead", "soot"],
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
metadata = chunk.to_metadata()
|
| 117 |
+
|
| 118 |
+
assert metadata["source"] == "test.md"
|
| 119 |
+
assert metadata["category"] == "methodology"
|
| 120 |
+
assert metadata["priority"] == "primary"
|
| 121 |
+
assert metadata["content_type"] == "narrative"
|
| 122 |
+
assert "lead" in metadata["keywords"]
|
| 123 |
+
assert "soot" in metadata["keywords"]
|
| 124 |
+
|
| 125 |
+
def test_split_by_headers(self):
|
| 126 |
+
"""Test section splitting by markdown headers."""
|
| 127 |
+
text = """## Section One
|
| 128 |
+
|
| 129 |
+
Content one.
|
| 130 |
+
|
| 131 |
+
### Subsection A
|
| 132 |
+
|
| 133 |
+
Content A.
|
| 134 |
+
|
| 135 |
+
## Section Two
|
| 136 |
+
|
| 137 |
+
Content two.
|
| 138 |
+
"""
|
| 139 |
+
chunker = SemanticChunker()
|
| 140 |
+
sections = chunker._split_by_headers(text)
|
| 141 |
+
|
| 142 |
+
# Should have at least 3 sections (Introduction + 2 main + 1 sub)
|
| 143 |
+
assert len(sections) >= 2
|
| 144 |
+
headers = [s[0] for s in sections]
|
| 145 |
+
assert any("Section One" in h for h in headers)
|
| 146 |
+
assert any("Section Two" in h for h in headers)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
class TestMockEmbeddingFunction:
|
| 150 |
+
"""Test mock embedding function."""
|
| 151 |
+
|
| 152 |
+
def test_embedding_dimension(self):
|
| 153 |
+
"""Test that embeddings have correct dimension."""
|
| 154 |
+
mock = MockEmbeddingFunction()
|
| 155 |
+
embeddings = mock(["test text"])
|
| 156 |
+
|
| 157 |
+
assert len(embeddings) == 1
|
| 158 |
+
assert len(embeddings[0]) == mock.EMBEDDING_DIM
|
| 159 |
+
|
| 160 |
+
def test_deterministic_embeddings(self):
|
| 161 |
+
"""Test that same text produces same embedding."""
|
| 162 |
+
mock = MockEmbeddingFunction()
|
| 163 |
+
text = "This is a test sentence."
|
| 164 |
+
|
| 165 |
+
emb1 = mock([text])[0]
|
| 166 |
+
emb2 = mock([text])[0]
|
| 167 |
+
|
| 168 |
+
assert emb1 == emb2
|
| 169 |
+
|
| 170 |
+
def test_different_texts_different_embeddings(self):
|
| 171 |
+
"""Test that different texts produce different embeddings."""
|
| 172 |
+
mock = MockEmbeddingFunction()
|
| 173 |
+
|
| 174 |
+
emb1 = mock(["First text"])[0]
|
| 175 |
+
emb2 = mock(["Second text"])[0]
|
| 176 |
+
|
| 177 |
+
assert emb1 != emb2
|
| 178 |
+
|
| 179 |
+
def test_batch_embeddings(self):
|
| 180 |
+
"""Test embedding multiple texts at once."""
|
| 181 |
+
mock = MockEmbeddingFunction()
|
| 182 |
+
texts = ["Text one", "Text two", "Text three"]
|
| 183 |
+
|
| 184 |
+
embeddings = mock(texts)
|
| 185 |
+
|
| 186 |
+
assert len(embeddings) == 3
|
| 187 |
+
assert all(len(e) == mock.EMBEDDING_DIM for e in embeddings)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
class TestChromaVectorStore:
|
| 191 |
+
"""Test ChromaDB vector store."""
|
| 192 |
+
|
| 193 |
+
@pytest.fixture
|
| 194 |
+
def temp_dir(self):
|
| 195 |
+
"""Create a temporary directory for ChromaDB."""
|
| 196 |
+
temp = tempfile.mkdtemp()
|
| 197 |
+
yield temp
|
| 198 |
+
shutil.rmtree(temp)
|
| 199 |
+
|
| 200 |
+
@pytest.fixture
|
| 201 |
+
def vectorstore(self, temp_dir):
|
| 202 |
+
"""Create a test vector store."""
|
| 203 |
+
return ChromaVectorStore(
|
| 204 |
+
persist_directory=temp_dir,
|
| 205 |
+
embedding_function=MockEmbeddingFunction(),
|
| 206 |
+
)
|
| 207 |
+
|
| 208 |
+
@pytest.fixture
|
| 209 |
+
def sample_chunks(self):
|
| 210 |
+
"""Create sample chunks for testing."""
|
| 211 |
+
return [
|
| 212 |
+
Chunk(
|
| 213 |
+
id="chunk_001",
|
| 214 |
+
text="Lead threshold for non-operational facilities is 22 µg/100cm².",
|
| 215 |
+
source="fdam.md",
|
| 216 |
+
category="thresholds",
|
| 217 |
+
section="## 1.4 Thresholds",
|
| 218 |
+
priority="primary",
|
| 219 |
+
content_type="narrative",
|
| 220 |
+
keywords=["lead", "non-operational"],
|
| 221 |
+
),
|
| 222 |
+
Chunk(
|
| 223 |
+
id="chunk_002",
|
| 224 |
+
text="Burn zone requires structural assessment before cleaning.",
|
| 225 |
+
source="fdam.md",
|
| 226 |
+
category="methodology",
|
| 227 |
+
section="## 4.1 Zone Classification",
|
| 228 |
+
priority="primary",
|
| 229 |
+
content_type="narrative",
|
| 230 |
+
keywords=["burn zone", "structural damage"],
|
| 231 |
+
),
|
| 232 |
+
Chunk(
|
| 233 |
+
id="chunk_003",
|
| 234 |
+
text="HEPA vacuum is required for soot removal.",
|
| 235 |
+
source="cleaning.md",
|
| 236 |
+
category="cleaning-procedures",
|
| 237 |
+
section="## 3.2 Methods",
|
| 238 |
+
priority="reference-narrative",
|
| 239 |
+
content_type="narrative",
|
| 240 |
+
keywords=["hepa", "vacuum", "soot"],
|
| 241 |
+
),
|
| 242 |
+
]
|
| 243 |
+
|
| 244 |
+
def test_add_chunks(self, vectorstore, sample_chunks):
|
| 245 |
+
"""Test adding chunks to vector store."""
|
| 246 |
+
count = vectorstore.add_chunks(sample_chunks)
|
| 247 |
+
assert count == 3
|
| 248 |
+
|
| 249 |
+
stats = vectorstore.get_stats()
|
| 250 |
+
assert stats["total_chunks"] == 3
|
| 251 |
+
|
| 252 |
+
def test_query_returns_results(self, vectorstore, sample_chunks):
|
| 253 |
+
"""Test querying the vector store."""
|
| 254 |
+
vectorstore.add_chunks(sample_chunks)
|
| 255 |
+
|
| 256 |
+
results = vectorstore.query("lead threshold", n_results=2)
|
| 257 |
+
|
| 258 |
+
assert len(results) <= 2
|
| 259 |
+
assert all("id" in r for r in results)
|
| 260 |
+
assert all("document" in r for r in results)
|
| 261 |
+
assert all("metadata" in r for r in results)
|
| 262 |
+
assert all("distance" in r for r in results)
|
| 263 |
+
|
| 264 |
+
def test_query_with_metadata_filter(self, vectorstore, sample_chunks):
|
| 265 |
+
"""Test querying with metadata filter."""
|
| 266 |
+
vectorstore.add_chunks(sample_chunks)
|
| 267 |
+
|
| 268 |
+
results = vectorstore.query(
|
| 269 |
+
"cleaning method",
|
| 270 |
+
n_results=5,
|
| 271 |
+
where={"priority": "primary"},
|
| 272 |
+
)
|
| 273 |
+
|
| 274 |
+
# All results should have primary priority
|
| 275 |
+
for r in results:
|
| 276 |
+
assert r["metadata"]["priority"] == "primary"
|
| 277 |
+
|
| 278 |
+
def test_clear_collection(self, vectorstore, sample_chunks):
|
| 279 |
+
"""Test clearing the collection."""
|
| 280 |
+
vectorstore.add_chunks(sample_chunks)
|
| 281 |
+
assert vectorstore.get_stats()["total_chunks"] == 3
|
| 282 |
+
|
| 283 |
+
vectorstore.clear()
|
| 284 |
+
assert vectorstore.get_stats()["total_chunks"] == 0
|
| 285 |
+
|
| 286 |
+
def test_delete_by_source(self, vectorstore, sample_chunks):
|
| 287 |
+
"""Test deleting chunks by source."""
|
| 288 |
+
vectorstore.add_chunks(sample_chunks)
|
| 289 |
+
|
| 290 |
+
deleted = vectorstore.delete_by_source("fdam.md")
|
| 291 |
+
assert deleted == 2 # Two chunks from fdam.md
|
| 292 |
+
|
| 293 |
+
stats = vectorstore.get_stats()
|
| 294 |
+
assert stats["total_chunks"] == 1
|
| 295 |
+
|
| 296 |
+
def test_get_stats(self, vectorstore, sample_chunks):
|
| 297 |
+
"""Test getting collection statistics."""
|
| 298 |
+
vectorstore.add_chunks(sample_chunks)
|
| 299 |
+
|
| 300 |
+
stats = vectorstore.get_stats()
|
| 301 |
+
|
| 302 |
+
assert stats["total_chunks"] == 3
|
| 303 |
+
assert "thresholds" in stats["categories"]
|
| 304 |
+
assert "methodology" in stats["categories"]
|
| 305 |
+
assert "primary" in stats["priorities"]
|
| 306 |
+
assert "reference-narrative" in stats["priorities"]
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
class TestMockReranker:
|
| 310 |
+
"""Test mock reranker."""
|
| 311 |
+
|
| 312 |
+
def test_rerank_returns_scores(self):
|
| 313 |
+
"""Test that reranker returns scores."""
|
| 314 |
+
reranker = MockReranker()
|
| 315 |
+
query = "lead threshold contamination"
|
| 316 |
+
documents = [
|
| 317 |
+
"Lead threshold for facilities is 22 µg/100cm².",
|
| 318 |
+
"The weather is nice today.",
|
| 319 |
+
"Contamination levels require assessment.",
|
| 320 |
+
]
|
| 321 |
+
|
| 322 |
+
scores = reranker.rerank(query, documents)
|
| 323 |
+
|
| 324 |
+
assert len(scores) == 3
|
| 325 |
+
assert all(0 <= s <= 1 for s in scores)
|
| 326 |
+
|
| 327 |
+
def test_relevant_doc_higher_score(self):
|
| 328 |
+
"""Test that more relevant docs get higher scores."""
|
| 329 |
+
reranker = MockReranker()
|
| 330 |
+
query = "lead threshold"
|
| 331 |
+
documents = [
|
| 332 |
+
"Lead threshold is 22 µg.", # Very relevant
|
| 333 |
+
"Weather forecast for tomorrow.", # Not relevant
|
| 334 |
+
]
|
| 335 |
+
|
| 336 |
+
scores = reranker.rerank(query, documents)
|
| 337 |
+
|
| 338 |
+
# First doc should have higher score (shares more words)
|
| 339 |
+
assert scores[0] > scores[1]
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
class TestFDAMRetriever:
|
| 343 |
+
"""Test FDAM retriever with priority weighting."""
|
| 344 |
+
|
| 345 |
+
@pytest.fixture
|
| 346 |
+
def temp_dir(self):
|
| 347 |
+
"""Create a temporary directory."""
|
| 348 |
+
temp = tempfile.mkdtemp()
|
| 349 |
+
yield temp
|
| 350 |
+
shutil.rmtree(temp)
|
| 351 |
+
|
| 352 |
+
@pytest.fixture
|
| 353 |
+
def retriever(self, temp_dir):
|
| 354 |
+
"""Create a test retriever with sample data."""
|
| 355 |
+
vectorstore = ChromaVectorStore(
|
| 356 |
+
persist_directory=temp_dir,
|
| 357 |
+
embedding_function=MockEmbeddingFunction(),
|
| 358 |
+
)
|
| 359 |
+
|
| 360 |
+
# Add sample chunks
|
| 361 |
+
chunks = [
|
| 362 |
+
Chunk(
|
| 363 |
+
id="primary_001",
|
| 364 |
+
text="Lead threshold for non-operational is 22 µg/100cm² per FDAM.",
|
| 365 |
+
source="fdam.md",
|
| 366 |
+
category="thresholds",
|
| 367 |
+
section="## Thresholds",
|
| 368 |
+
priority="primary",
|
| 369 |
+
content_type="narrative",
|
| 370 |
+
keywords=["lead", "threshold", "non-operational"],
|
| 371 |
+
),
|
| 372 |
+
Chunk(
|
| 373 |
+
id="ref_001",
|
| 374 |
+
text="Lead clearance levels from BNL SOP.",
|
| 375 |
+
source="bnl.md",
|
| 376 |
+
category="thresholds",
|
| 377 |
+
section="## Attachment 9.3",
|
| 378 |
+
priority="reference-threshold",
|
| 379 |
+
content_type="table",
|
| 380 |
+
keywords=["lead", "clearance"],
|
| 381 |
+
),
|
| 382 |
+
Chunk(
|
| 383 |
+
id="ref_002",
|
| 384 |
+
text="General cleaning procedures for soot removal.",
|
| 385 |
+
source="cleaning.md",
|
| 386 |
+
category="cleaning-procedures",
|
| 387 |
+
section="## Methods",
|
| 388 |
+
priority="reference-narrative",
|
| 389 |
+
content_type="narrative",
|
| 390 |
+
keywords=["cleaning", "soot"],
|
| 391 |
+
),
|
| 392 |
+
]
|
| 393 |
+
vectorstore.add_chunks(chunks)
|
| 394 |
+
|
| 395 |
+
return FDAMRetriever(
|
| 396 |
+
vectorstore=vectorstore,
|
| 397 |
+
reranker=MockReranker(),
|
| 398 |
+
use_reranking=True,
|
| 399 |
+
)
|
| 400 |
+
|
| 401 |
+
def test_retrieve_returns_results(self, retriever):
|
| 402 |
+
"""Test basic retrieval."""
|
| 403 |
+
results = retriever.retrieve("lead threshold", top_k=3)
|
| 404 |
+
|
| 405 |
+
assert len(results) <= 3
|
| 406 |
+
assert all(isinstance(r, RetrievalResult) for r in results)
|
| 407 |
+
|
| 408 |
+
def test_priority_weighting(self, retriever):
|
| 409 |
+
"""Test that primary sources get higher weight."""
|
| 410 |
+
results = retriever.retrieve("lead threshold", top_k=3)
|
| 411 |
+
|
| 412 |
+
# Find primary and reference results
|
| 413 |
+
primary_results = [r for r in results if r.priority == "primary"]
|
| 414 |
+
ref_results = [r for r in results if r.priority != "primary"]
|
| 415 |
+
|
| 416 |
+
if primary_results and ref_results:
|
| 417 |
+
# Primary should have higher weighted score (before reranking)
|
| 418 |
+
# Note: final_score includes reranking which may change order
|
| 419 |
+
primary = primary_results[0]
|
| 420 |
+
ref = ref_results[0]
|
| 421 |
+
|
| 422 |
+
# With similar similarity, primary weight (1.0) > ref weight (0.8-0.9)
|
| 423 |
+
# This test validates the weighting is applied
|
| 424 |
+
assert primary.weighted_score > 0
|
| 425 |
+
|
| 426 |
+
def test_category_filter(self, retriever):
|
| 427 |
+
"""Test filtering by category."""
|
| 428 |
+
results = retriever.retrieve(
|
| 429 |
+
"cleaning method",
|
| 430 |
+
top_k=5,
|
| 431 |
+
category_filter="cleaning-procedures",
|
| 432 |
+
)
|
| 433 |
+
|
| 434 |
+
for r in results:
|
| 435 |
+
assert r.category == "cleaning-procedures"
|
| 436 |
+
|
| 437 |
+
def test_priority_filter(self, retriever):
|
| 438 |
+
"""Test filtering by priority."""
|
| 439 |
+
results = retriever.retrieve(
|
| 440 |
+
"threshold",
|
| 441 |
+
top_k=5,
|
| 442 |
+
priority_filter="primary",
|
| 443 |
+
)
|
| 444 |
+
|
| 445 |
+
for r in results:
|
| 446 |
+
assert r.priority == "primary"
|
| 447 |
+
|
| 448 |
+
def test_retrieve_for_context(self, retriever):
|
| 449 |
+
"""Test context string generation."""
|
| 450 |
+
context = retriever.retrieve_for_context("lead threshold", top_k=2)
|
| 451 |
+
|
| 452 |
+
assert isinstance(context, str)
|
| 453 |
+
assert "Source:" in context or "No relevant context" in context
|
| 454 |
+
|
| 455 |
+
def test_retrieve_thresholds(self, retriever):
|
| 456 |
+
"""Test threshold-specific retrieval."""
|
| 457 |
+
results = retriever.retrieve_thresholds(
|
| 458 |
+
material_type="lead",
|
| 459 |
+
facility_type="non-operational",
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
assert len(results) <= 3
|
| 463 |
+
# Should filter to thresholds category
|
| 464 |
+
for r in results:
|
| 465 |
+
assert r.category == "thresholds"
|
| 466 |
+
|
| 467 |
+
def test_retrieve_disposition(self, retriever):
|
| 468 |
+
"""Test disposition-specific retrieval."""
|
| 469 |
+
results = retriever.retrieve_disposition(
|
| 470 |
+
zone="burn-zone",
|
| 471 |
+
condition="heavy",
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
# Should prefer primary sources
|
| 475 |
+
if results:
|
| 476 |
+
assert results[0].priority == "primary"
|
| 477 |
+
|
| 478 |
+
def test_result_to_dict(self, retriever):
|
| 479 |
+
"""Test RetrievalResult to_dict method."""
|
| 480 |
+
results = retriever.retrieve("test", top_k=1)
|
| 481 |
+
|
| 482 |
+
if results:
|
| 483 |
+
result_dict = results[0].to_dict()
|
| 484 |
+
assert "chunk_id" in result_dict
|
| 485 |
+
assert "text" in result_dict
|
| 486 |
+
assert "source" in result_dict
|
| 487 |
+
assert "similarity_score" in result_dict
|
| 488 |
+
assert "final_score" in result_dict
|
| 489 |
+
|
| 490 |
+
def test_empty_query_handling(self, retriever):
|
| 491 |
+
"""Test handling of query with no good matches."""
|
| 492 |
+
results = retriever.retrieve(
|
| 493 |
+
"completely unrelated xyz123",
|
| 494 |
+
top_k=5,
|
| 495 |
+
category_filter="thresholds",
|
| 496 |
+
)
|
| 497 |
+
|
| 498 |
+
# Should still return results (just lower scores)
|
| 499 |
+
assert isinstance(results, list)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
class TestChunkFile:
|
| 503 |
+
"""Test the chunk_file convenience function."""
|
| 504 |
+
|
| 505 |
+
@pytest.fixture
|
| 506 |
+
def temp_md_file(self):
|
| 507 |
+
"""Create a temporary markdown file."""
|
| 508 |
+
temp = tempfile.NamedTemporaryFile(
|
| 509 |
+
mode="w",
|
| 510 |
+
suffix=".md",
|
| 511 |
+
delete=False,
|
| 512 |
+
encoding="utf-8",
|
| 513 |
+
)
|
| 514 |
+
temp.write("""## Test Document
|
| 515 |
+
|
| 516 |
+
This is test content for chunking.
|
| 517 |
+
|
| 518 |
+
| Column A | Column B |
|
| 519 |
+
|----------|----------|
|
| 520 |
+
| Value 1 | Value 2 |
|
| 521 |
+
""")
|
| 522 |
+
temp.close()
|
| 523 |
+
yield Path(temp.name)
|
| 524 |
+
Path(temp.name).unlink()
|
| 525 |
+
|
| 526 |
+
def test_chunk_file(self, temp_md_file):
|
| 527 |
+
"""Test chunking a file directly."""
|
| 528 |
+
chunks = chunk_file(
|
| 529 |
+
filepath=temp_md_file,
|
| 530 |
+
category="methodology",
|
| 531 |
+
priority="primary",
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
assert len(chunks) >= 1
|
| 535 |
+
assert all(c.source == temp_md_file.name for c in chunks)
|
| 536 |
+
assert all(c.category == "methodology" for c in chunks)
|
tests/test_schemas.py
ADDED
|
@@ -0,0 +1,459 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for FDAM AI Pipeline Pydantic schemas."""
|
| 2 |
+
|
| 3 |
+
from datetime import date
|
| 4 |
+
|
| 5 |
+
import pytest
|
| 6 |
+
from pydantic import ValidationError
|
| 7 |
+
|
| 8 |
+
from schemas import (
|
| 9 |
+
# Input models
|
| 10 |
+
AssessmentInput,
|
| 11 |
+
Dimensions,
|
| 12 |
+
ImageMetadata,
|
| 13 |
+
ProjectInfo,
|
| 14 |
+
QualitativeObservations,
|
| 15 |
+
Room,
|
| 16 |
+
Surface,
|
| 17 |
+
get_material_category,
|
| 18 |
+
# Output models
|
| 19 |
+
AirFiltration,
|
| 20 |
+
CalculationResults,
|
| 21 |
+
CombustionIndicators,
|
| 22 |
+
ConditionAnalysis,
|
| 23 |
+
ConfidenceReport,
|
| 24 |
+
DetectedMaterial,
|
| 25 |
+
EquipmentRequirements,
|
| 26 |
+
GeneratedDocuments,
|
| 27 |
+
LaborEstimate,
|
| 28 |
+
RegulatoryFlags,
|
| 29 |
+
SampleDensity,
|
| 30 |
+
SamplingRecommendation,
|
| 31 |
+
SurfaceAreas,
|
| 32 |
+
VisionAnalysisResult,
|
| 33 |
+
ZoneAnalysis,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# --- Input Schema Tests ---
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
class TestMaterialCategory:
|
| 41 |
+
"""Test material category helper function."""
|
| 42 |
+
|
| 43 |
+
def test_non_porous_materials(self):
|
| 44 |
+
assert get_material_category("steel") == "non-porous"
|
| 45 |
+
assert get_material_category("concrete") == "non-porous"
|
| 46 |
+
assert get_material_category("glass") == "non-porous"
|
| 47 |
+
assert get_material_category("metal") == "non-porous"
|
| 48 |
+
assert get_material_category("cmu") == "non-porous"
|
| 49 |
+
|
| 50 |
+
def test_semi_porous_materials(self):
|
| 51 |
+
assert get_material_category("drywall-painted") == "semi-porous"
|
| 52 |
+
assert get_material_category("drywall-unpainted") == "semi-porous"
|
| 53 |
+
assert get_material_category("wood-sealed") == "semi-porous"
|
| 54 |
+
assert get_material_category("wood-unsealed") == "semi-porous"
|
| 55 |
+
|
| 56 |
+
def test_porous_materials(self):
|
| 57 |
+
assert get_material_category("carpet") == "porous"
|
| 58 |
+
assert get_material_category("carpet-pad") == "porous"
|
| 59 |
+
assert get_material_category("insulation-fiberglass") == "porous"
|
| 60 |
+
assert get_material_category("acoustic-tile") == "porous"
|
| 61 |
+
assert get_material_category("upholstery") == "porous"
|
| 62 |
+
|
| 63 |
+
def test_hvac_materials(self):
|
| 64 |
+
assert get_material_category("ductwork-rigid") == "hvac"
|
| 65 |
+
assert get_material_category("ductwork-flexible") == "hvac"
|
| 66 |
+
assert get_material_category("hvac-interior-insulation") == "hvac"
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class TestDimensions:
|
| 70 |
+
"""Test Dimensions model."""
|
| 71 |
+
|
| 72 |
+
def test_valid_dimensions(self):
|
| 73 |
+
dims = Dimensions(length_ft=100, width_ft=50, ceiling_height_ft=20)
|
| 74 |
+
assert dims.area_sf == 5000
|
| 75 |
+
assert dims.volume_cf == 100000
|
| 76 |
+
|
| 77 |
+
def test_invalid_zero_dimension(self):
|
| 78 |
+
with pytest.raises(ValidationError):
|
| 79 |
+
Dimensions(length_ft=0, width_ft=50, ceiling_height_ft=20)
|
| 80 |
+
|
| 81 |
+
def test_invalid_negative_dimension(self):
|
| 82 |
+
with pytest.raises(ValidationError):
|
| 83 |
+
Dimensions(length_ft=-10, width_ft=50, ceiling_height_ft=20)
|
| 84 |
+
|
| 85 |
+
def test_dimension_exceeds_max(self):
|
| 86 |
+
with pytest.raises(ValidationError):
|
| 87 |
+
Dimensions(length_ft=20000, width_ft=50, ceiling_height_ft=20)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class TestSurface:
|
| 91 |
+
"""Test Surface model."""
|
| 92 |
+
|
| 93 |
+
def test_valid_surface(self):
|
| 94 |
+
surface = Surface(
|
| 95 |
+
id="surf-001",
|
| 96 |
+
material="steel",
|
| 97 |
+
description="North wall steel panel",
|
| 98 |
+
area_sf=500,
|
| 99 |
+
)
|
| 100 |
+
assert surface.category == "non-porous"
|
| 101 |
+
assert surface.zone is None
|
| 102 |
+
assert surface.ai_detected is False
|
| 103 |
+
|
| 104 |
+
def test_surface_with_zone_and_condition(self):
|
| 105 |
+
surface = Surface(
|
| 106 |
+
id="surf-002",
|
| 107 |
+
material="carpet",
|
| 108 |
+
description="Main floor carpet",
|
| 109 |
+
area_sf=2000,
|
| 110 |
+
zone="near-field",
|
| 111 |
+
condition="moderate",
|
| 112 |
+
disposition="remove",
|
| 113 |
+
)
|
| 114 |
+
assert surface.category == "porous"
|
| 115 |
+
assert surface.zone == "near-field"
|
| 116 |
+
assert surface.condition == "moderate"
|
| 117 |
+
assert surface.disposition == "remove"
|
| 118 |
+
|
| 119 |
+
def test_invalid_material(self):
|
| 120 |
+
with pytest.raises(ValidationError):
|
| 121 |
+
Surface(
|
| 122 |
+
id="surf-003",
|
| 123 |
+
material="invalid-material",
|
| 124 |
+
description="Test surface",
|
| 125 |
+
area_sf=100,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
class TestRoom:
|
| 130 |
+
"""Test Room model."""
|
| 131 |
+
|
| 132 |
+
def test_valid_room(self):
|
| 133 |
+
room = Room(
|
| 134 |
+
id="room-001",
|
| 135 |
+
name="Warehouse Bay A",
|
| 136 |
+
dimensions=Dimensions(length_ft=100, width_ft=50, ceiling_height_ft=20),
|
| 137 |
+
)
|
| 138 |
+
assert room.zone_classification is None
|
| 139 |
+
assert len(room.surfaces) == 0
|
| 140 |
+
assert len(room.image_ids) == 0
|
| 141 |
+
|
| 142 |
+
def test_room_with_surfaces(self):
|
| 143 |
+
room = Room(
|
| 144 |
+
id="room-002",
|
| 145 |
+
name="Office Space",
|
| 146 |
+
floor="Ground Floor",
|
| 147 |
+
dimensions=Dimensions(length_ft=30, width_ft=20, ceiling_height_ft=10),
|
| 148 |
+
zone_classification="far-field",
|
| 149 |
+
zone_confidence=0.85,
|
| 150 |
+
surfaces=[
|
| 151 |
+
Surface(
|
| 152 |
+
id="surf-001",
|
| 153 |
+
material="drywall-painted",
|
| 154 |
+
description="North wall",
|
| 155 |
+
area_sf=300,
|
| 156 |
+
),
|
| 157 |
+
Surface(
|
| 158 |
+
id="surf-002",
|
| 159 |
+
material="carpet",
|
| 160 |
+
description="Floor carpet",
|
| 161 |
+
area_sf=600,
|
| 162 |
+
),
|
| 163 |
+
],
|
| 164 |
+
)
|
| 165 |
+
assert len(room.surfaces) == 2
|
| 166 |
+
assert room.zone_classification == "far-field"
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class TestProjectInfo:
|
| 170 |
+
"""Test ProjectInfo model."""
|
| 171 |
+
|
| 172 |
+
def test_valid_project_info(self):
|
| 173 |
+
project = ProjectInfo(
|
| 174 |
+
project_name="ABC Warehouse Fire",
|
| 175 |
+
address="123 Main Street",
|
| 176 |
+
city="Springfield",
|
| 177 |
+
state="IL",
|
| 178 |
+
zip_code="62701",
|
| 179 |
+
client_name="ABC Industries",
|
| 180 |
+
fire_date=date(2024, 12, 15),
|
| 181 |
+
assessment_date=date(2024, 12, 20),
|
| 182 |
+
facility_classification="non-operational",
|
| 183 |
+
construction_era="post-2000",
|
| 184 |
+
assessor_name="John Smith",
|
| 185 |
+
assessor_credentials="CIH",
|
| 186 |
+
)
|
| 187 |
+
assert project.project_name == "ABC Warehouse Fire"
|
| 188 |
+
assert project.facility_classification == "non-operational"
|
| 189 |
+
|
| 190 |
+
def test_missing_required_field(self):
|
| 191 |
+
with pytest.raises(ValidationError):
|
| 192 |
+
ProjectInfo(
|
| 193 |
+
project_name="Test Project",
|
| 194 |
+
# Missing address and other required fields
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class TestQualitativeObservations:
|
| 199 |
+
"""Test QualitativeObservations model."""
|
| 200 |
+
|
| 201 |
+
def test_minimal_observations(self):
|
| 202 |
+
obs = QualitativeObservations(
|
| 203 |
+
smoke_fire_odor=True,
|
| 204 |
+
visible_soot_deposits=True,
|
| 205 |
+
large_char_particles=False,
|
| 206 |
+
ash_like_residue=False,
|
| 207 |
+
surface_discoloration=True,
|
| 208 |
+
dust_loading_interference=False,
|
| 209 |
+
wildfire_indicators=False,
|
| 210 |
+
)
|
| 211 |
+
assert obs.smoke_fire_odor is True
|
| 212 |
+
assert obs.odor_intensity is None
|
| 213 |
+
|
| 214 |
+
def test_full_observations(self):
|
| 215 |
+
obs = QualitativeObservations(
|
| 216 |
+
smoke_fire_odor=True,
|
| 217 |
+
odor_intensity="strong",
|
| 218 |
+
visible_soot_deposits=True,
|
| 219 |
+
soot_pattern_description="Heavy deposits on ceiling",
|
| 220 |
+
large_char_particles=True,
|
| 221 |
+
char_density_estimate="moderate",
|
| 222 |
+
ash_like_residue=True,
|
| 223 |
+
ash_color_texture="Gray powdery residue",
|
| 224 |
+
surface_discoloration=True,
|
| 225 |
+
discoloration_description="Yellowing on walls",
|
| 226 |
+
dust_loading_interference=False,
|
| 227 |
+
wildfire_indicators=False,
|
| 228 |
+
additional_notes="Structural engineer review recommended",
|
| 229 |
+
)
|
| 230 |
+
assert obs.odor_intensity == "strong"
|
| 231 |
+
assert obs.char_density_estimate == "moderate"
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
class TestAssessmentInput:
|
| 235 |
+
"""Test complete AssessmentInput model."""
|
| 236 |
+
|
| 237 |
+
@pytest.fixture
|
| 238 |
+
def sample_project(self):
|
| 239 |
+
return ProjectInfo(
|
| 240 |
+
project_name="Test Project",
|
| 241 |
+
address="123 Test St",
|
| 242 |
+
city="TestCity",
|
| 243 |
+
state="TX",
|
| 244 |
+
zip_code="12345",
|
| 245 |
+
client_name="Test Client",
|
| 246 |
+
fire_date=date(2024, 12, 1),
|
| 247 |
+
assessment_date=date(2024, 12, 15),
|
| 248 |
+
facility_classification="operational",
|
| 249 |
+
construction_era="1980-2000",
|
| 250 |
+
assessor_name="Test Assessor",
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
@pytest.fixture
|
| 254 |
+
def sample_room(self):
|
| 255 |
+
return Room(
|
| 256 |
+
id="room-001",
|
| 257 |
+
name="Test Room",
|
| 258 |
+
dimensions=Dimensions(length_ft=50, width_ft=30, ceiling_height_ft=12),
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
@pytest.fixture
|
| 262 |
+
def sample_observations(self):
|
| 263 |
+
return QualitativeObservations(
|
| 264 |
+
smoke_fire_odor=True,
|
| 265 |
+
visible_soot_deposits=True,
|
| 266 |
+
large_char_particles=False,
|
| 267 |
+
ash_like_residue=False,
|
| 268 |
+
surface_discoloration=False,
|
| 269 |
+
dust_loading_interference=False,
|
| 270 |
+
wildfire_indicators=False,
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
def test_valid_assessment_input(self, sample_project, sample_room, sample_observations):
|
| 274 |
+
assessment = AssessmentInput(
|
| 275 |
+
project=sample_project,
|
| 276 |
+
rooms=[sample_room],
|
| 277 |
+
observations=sample_observations,
|
| 278 |
+
)
|
| 279 |
+
assert len(assessment.rooms) == 1
|
| 280 |
+
assert len(assessment.images) == 0
|
| 281 |
+
|
| 282 |
+
def test_duplicate_room_ids(self, sample_project, sample_room, sample_observations):
|
| 283 |
+
room2 = Room(
|
| 284 |
+
id="room-001", # Same ID as sample_room
|
| 285 |
+
name="Duplicate Room",
|
| 286 |
+
dimensions=Dimensions(length_ft=20, width_ft=20, ceiling_height_ft=10),
|
| 287 |
+
)
|
| 288 |
+
with pytest.raises(ValidationError) as exc_info:
|
| 289 |
+
AssessmentInput(
|
| 290 |
+
project=sample_project,
|
| 291 |
+
rooms=[sample_room, room2],
|
| 292 |
+
observations=sample_observations,
|
| 293 |
+
)
|
| 294 |
+
assert "Room IDs must be unique" in str(exc_info.value)
|
| 295 |
+
|
| 296 |
+
def test_image_references_invalid_room(self, sample_project, sample_room, sample_observations):
|
| 297 |
+
image = ImageMetadata(
|
| 298 |
+
id="img-001",
|
| 299 |
+
filename="test.jpg",
|
| 300 |
+
room_id="nonexistent-room",
|
| 301 |
+
)
|
| 302 |
+
with pytest.raises(ValidationError) as exc_info:
|
| 303 |
+
AssessmentInput(
|
| 304 |
+
project=sample_project,
|
| 305 |
+
rooms=[sample_room],
|
| 306 |
+
images=[image],
|
| 307 |
+
observations=sample_observations,
|
| 308 |
+
)
|
| 309 |
+
assert "references unknown room" in str(exc_info.value)
|
| 310 |
+
|
| 311 |
+
def test_valid_image_reference(self, sample_project, sample_room, sample_observations):
|
| 312 |
+
image = ImageMetadata(
|
| 313 |
+
id="img-001",
|
| 314 |
+
filename="test.jpg",
|
| 315 |
+
room_id="room-001",
|
| 316 |
+
)
|
| 317 |
+
assessment = AssessmentInput(
|
| 318 |
+
project=sample_project,
|
| 319 |
+
rooms=[sample_room],
|
| 320 |
+
images=[image],
|
| 321 |
+
observations=sample_observations,
|
| 322 |
+
)
|
| 323 |
+
assert len(assessment.images) == 1
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
# --- Output Schema Tests ---
|
| 327 |
+
|
| 328 |
+
|
| 329 |
+
class TestVisionAnalysisResult:
|
| 330 |
+
"""Test VisionAnalysisResult output model."""
|
| 331 |
+
|
| 332 |
+
def test_valid_vision_result(self):
|
| 333 |
+
result = VisionAnalysisResult(
|
| 334 |
+
zone=ZoneAnalysis(
|
| 335 |
+
classification="near-field",
|
| 336 |
+
confidence=0.85,
|
| 337 |
+
reasoning="Heavy soot deposits visible on surfaces",
|
| 338 |
+
),
|
| 339 |
+
condition=ConditionAnalysis(
|
| 340 |
+
level="moderate",
|
| 341 |
+
confidence=0.80,
|
| 342 |
+
reasoning="Visible film on surfaces",
|
| 343 |
+
),
|
| 344 |
+
materials=[
|
| 345 |
+
DetectedMaterial(
|
| 346 |
+
type="steel",
|
| 347 |
+
category="non-porous",
|
| 348 |
+
confidence=0.90,
|
| 349 |
+
location_description="Ceiling structure",
|
| 350 |
+
),
|
| 351 |
+
],
|
| 352 |
+
combustion_indicators=CombustionIndicators(
|
| 353 |
+
soot_visible=True,
|
| 354 |
+
soot_pattern="Heavy deposits on horizontal surfaces",
|
| 355 |
+
char_visible=False,
|
| 356 |
+
ash_visible=True,
|
| 357 |
+
ash_description="Gray powdery residue",
|
| 358 |
+
),
|
| 359 |
+
structural_concerns=["Beam deflection observed"],
|
| 360 |
+
access_issues=["High ceiling requires lift access"],
|
| 361 |
+
recommended_sampling_locations=[
|
| 362 |
+
SamplingRecommendation(
|
| 363 |
+
description="Center of contamination",
|
| 364 |
+
sample_type="tape_lift",
|
| 365 |
+
priority="high",
|
| 366 |
+
),
|
| 367 |
+
],
|
| 368 |
+
flags_for_review=["Zone boundary unclear"],
|
| 369 |
+
)
|
| 370 |
+
assert result.zone.classification == "near-field"
|
| 371 |
+
assert len(result.materials) == 1
|
| 372 |
+
assert result.combustion_indicators.soot_visible is True
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
class TestCalculationResults:
|
| 376 |
+
"""Test CalculationResults output model."""
|
| 377 |
+
|
| 378 |
+
def test_valid_calculation_results(self):
|
| 379 |
+
results = CalculationResults(
|
| 380 |
+
surface_areas=SurfaceAreas(
|
| 381 |
+
by_type={"steel": 5000, "carpet": 3000},
|
| 382 |
+
by_disposition={"clean": 5000, "remove": 3000},
|
| 383 |
+
total_floor_sf=8000,
|
| 384 |
+
total_surface_sf=8000,
|
| 385 |
+
total_volume_cf=160000,
|
| 386 |
+
),
|
| 387 |
+
air_filtration=AirFiltration(
|
| 388 |
+
total_volume_cf=160000,
|
| 389 |
+
required_ach=4,
|
| 390 |
+
unit_cfm=2000,
|
| 391 |
+
units_required=6,
|
| 392 |
+
calculation="(160,000 CF x 4 ACH) / (2000 CFM x 60) = 6 units",
|
| 393 |
+
),
|
| 394 |
+
sample_density=SampleDensity(
|
| 395 |
+
total_sf=8000,
|
| 396 |
+
size_category="5,000 - 25,000 SF",
|
| 397 |
+
surface_types_count=2,
|
| 398 |
+
surface_types=["steel", "carpet"],
|
| 399 |
+
tape_lifts_per_type="5-10",
|
| 400 |
+
surface_wipes_per_type="5-10",
|
| 401 |
+
recommended_tape_lifts=20,
|
| 402 |
+
recommended_surface_wipes=20,
|
| 403 |
+
),
|
| 404 |
+
labor_estimate=LaborEstimate(
|
| 405 |
+
hepa_vacuum=10,
|
| 406 |
+
wet_wipe=25,
|
| 407 |
+
removal=15,
|
| 408 |
+
total_hours=50,
|
| 409 |
+
),
|
| 410 |
+
equipment=EquipmentRequirements(
|
| 411 |
+
air_scrubbers=6,
|
| 412 |
+
hepa_vacuums=2,
|
| 413 |
+
),
|
| 414 |
+
regulatory_flags=RegulatoryFlags(),
|
| 415 |
+
)
|
| 416 |
+
assert results.air_filtration.units_required == 6
|
| 417 |
+
assert results.labor_estimate.total_hours == 50
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
class TestConfidenceReport:
|
| 421 |
+
"""Test ConfidenceReport output model."""
|
| 422 |
+
|
| 423 |
+
def test_high_confidence_report(self):
|
| 424 |
+
report = ConfidenceReport(
|
| 425 |
+
flagged_items=[],
|
| 426 |
+
overall_confidence=0.92,
|
| 427 |
+
review_required=False,
|
| 428 |
+
)
|
| 429 |
+
assert report.review_required is False
|
| 430 |
+
|
| 431 |
+
def test_low_confidence_report(self):
|
| 432 |
+
from schemas import FlaggedItem
|
| 433 |
+
|
| 434 |
+
report = ConfidenceReport(
|
| 435 |
+
flagged_items=[
|
| 436 |
+
FlaggedItem(
|
| 437 |
+
type="zone_classification",
|
| 438 |
+
room="Warehouse Bay A",
|
| 439 |
+
confidence=0.55,
|
| 440 |
+
recommendation="Professional review recommended",
|
| 441 |
+
),
|
| 442 |
+
],
|
| 443 |
+
overall_confidence=0.55,
|
| 444 |
+
review_required=True,
|
| 445 |
+
)
|
| 446 |
+
assert report.review_required is True
|
| 447 |
+
assert len(report.flagged_items) == 1
|
| 448 |
+
|
| 449 |
+
|
| 450 |
+
class TestGeneratedDocuments:
|
| 451 |
+
"""Test GeneratedDocuments output model."""
|
| 452 |
+
|
| 453 |
+
def test_valid_documents(self):
|
| 454 |
+
docs = GeneratedDocuments(
|
| 455 |
+
cleaning_specification_md="# Cleaning Specification\n\n## Scope of Work...",
|
| 456 |
+
sampling_plan_md="# Sampling Plan\n\n## Recommendations...",
|
| 457 |
+
confidence_report_md="# Confidence Report\n\n## Summary...",
|
| 458 |
+
)
|
| 459 |
+
assert "Cleaning Specification" in docs.cleaning_specification_md
|
tests/test_tabs.py
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for tab UI modules."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import io
|
| 6 |
+
|
| 7 |
+
from ui.state import SessionState, RoomFormData, ImageFormData
|
| 8 |
+
from ui.tabs import project, rooms, images, observations, results
|
| 9 |
+
from ui.components import image_store
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestProjectTab:
|
| 13 |
+
"""Test Tab 1: Project Info."""
|
| 14 |
+
|
| 15 |
+
def test_update_session_from_form(self):
|
| 16 |
+
session = SessionState()
|
| 17 |
+
session = project.update_session_from_form(
|
| 18 |
+
session,
|
| 19 |
+
project_name="Test Project",
|
| 20 |
+
address="123 Main St",
|
| 21 |
+
city="Springfield",
|
| 22 |
+
state="IL",
|
| 23 |
+
zip_code="62701",
|
| 24 |
+
client_name="Test Client",
|
| 25 |
+
fire_date="2024-12-01",
|
| 26 |
+
assessment_date="2024-12-15",
|
| 27 |
+
facility_classification="Operational",
|
| 28 |
+
construction_era="Pre-1980",
|
| 29 |
+
assessor_name="John Smith",
|
| 30 |
+
assessor_credentials="CIH",
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
assert session.project.project_name == "Test Project"
|
| 34 |
+
assert session.project.facility_classification == "operational"
|
| 35 |
+
assert session.project.construction_era == "pre-1980"
|
| 36 |
+
|
| 37 |
+
def test_validate_and_continue_incomplete(self):
|
| 38 |
+
session = SessionState()
|
| 39 |
+
session, html, tab_index = project.validate_and_continue(
|
| 40 |
+
session,
|
| 41 |
+
project_name="", # Missing
|
| 42 |
+
address="123 Main",
|
| 43 |
+
city="City",
|
| 44 |
+
state="IL",
|
| 45 |
+
zip_code="12345",
|
| 46 |
+
client_name="Client",
|
| 47 |
+
fire_date="2024-01-01",
|
| 48 |
+
assessment_date="2024-01-02",
|
| 49 |
+
facility_classification="Non-Operational",
|
| 50 |
+
construction_era="Post-2000",
|
| 51 |
+
assessor_name="Name",
|
| 52 |
+
assessor_credentials="",
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
assert tab_index == 0 # Stay on tab
|
| 56 |
+
assert "Project name is required" in html
|
| 57 |
+
assert session.tab1_complete is False
|
| 58 |
+
|
| 59 |
+
def test_validate_and_continue_complete(self):
|
| 60 |
+
session = SessionState()
|
| 61 |
+
session, html, tab_index = project.validate_and_continue(
|
| 62 |
+
session,
|
| 63 |
+
project_name="Test",
|
| 64 |
+
address="123 Main",
|
| 65 |
+
city="City",
|
| 66 |
+
state="IL",
|
| 67 |
+
zip_code="12345",
|
| 68 |
+
client_name="Client",
|
| 69 |
+
fire_date="2024-01-01",
|
| 70 |
+
assessment_date="2024-01-02",
|
| 71 |
+
facility_classification="Non-Operational",
|
| 72 |
+
construction_era="Post-2000",
|
| 73 |
+
assessor_name="Name",
|
| 74 |
+
assessor_credentials="",
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
assert tab_index == 1 # Go to next tab
|
| 78 |
+
assert "✓" in html
|
| 79 |
+
assert session.tab1_complete is True
|
| 80 |
+
|
| 81 |
+
def test_load_form_from_session(self):
|
| 82 |
+
session = SessionState()
|
| 83 |
+
session.project.project_name = "Loaded Project"
|
| 84 |
+
session.project.facility_classification = "public-childcare"
|
| 85 |
+
session.project.construction_era = "1980-2000"
|
| 86 |
+
|
| 87 |
+
values = project.load_form_from_session(session)
|
| 88 |
+
|
| 89 |
+
assert values[0] == "Loaded Project" # project_name
|
| 90 |
+
assert values[8] == "Public/Childcare" # facility_classification (UI value)
|
| 91 |
+
assert values[9] == "1980-2000" # construction_era (UI value)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class TestRoomsTab:
|
| 95 |
+
"""Test Tab 2: Building/Rooms."""
|
| 96 |
+
|
| 97 |
+
def test_add_room_valid(self):
|
| 98 |
+
session = SessionState()
|
| 99 |
+
|
| 100 |
+
result = rooms.add_room(
|
| 101 |
+
session,
|
| 102 |
+
name="Room 1",
|
| 103 |
+
floor="Ground",
|
| 104 |
+
length=100.0,
|
| 105 |
+
width=50.0,
|
| 106 |
+
height=20.0,
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
session = result[0]
|
| 110 |
+
table_data = result[1]
|
| 111 |
+
validation_html = result[2]
|
| 112 |
+
|
| 113 |
+
assert len(session.rooms) == 1
|
| 114 |
+
assert session.rooms[0].name == "Room 1"
|
| 115 |
+
assert "✓" in validation_html
|
| 116 |
+
assert len(table_data) == 1
|
| 117 |
+
|
| 118 |
+
def test_add_room_invalid(self):
|
| 119 |
+
session = SessionState()
|
| 120 |
+
|
| 121 |
+
result = rooms.add_room(
|
| 122 |
+
session,
|
| 123 |
+
name="", # Missing
|
| 124 |
+
floor="",
|
| 125 |
+
length=0, # Invalid
|
| 126 |
+
width=50.0,
|
| 127 |
+
height=20.0,
|
| 128 |
+
)
|
| 129 |
+
|
| 130 |
+
session = result[0]
|
| 131 |
+
validation_html = result[2]
|
| 132 |
+
|
| 133 |
+
assert len(session.rooms) == 0
|
| 134 |
+
assert "Room name is required" in validation_html
|
| 135 |
+
assert "Length must be greater than 0" in validation_html
|
| 136 |
+
|
| 137 |
+
def test_remove_last_room(self):
|
| 138 |
+
session = SessionState()
|
| 139 |
+
session.rooms.append(RoomFormData(name="Room 1", length_ft=100, width_ft=50, ceiling_height_ft=20))
|
| 140 |
+
session.rooms.append(RoomFormData(name="Room 2", length_ft=75, width_ft=40, ceiling_height_ft=15))
|
| 141 |
+
|
| 142 |
+
session, table_data, html, count, area, volume = rooms.remove_last_room(session)
|
| 143 |
+
|
| 144 |
+
assert len(session.rooms) == 1
|
| 145 |
+
assert session.rooms[0].name == "Room 1"
|
| 146 |
+
assert "Room 2" in html
|
| 147 |
+
|
| 148 |
+
def test_validate_and_continue(self):
|
| 149 |
+
session = SessionState()
|
| 150 |
+
session.rooms.append(RoomFormData(name="Room 1", length_ft=100, width_ft=50, ceiling_height_ft=20))
|
| 151 |
+
|
| 152 |
+
session, html, tab_index = rooms.validate_and_continue(session)
|
| 153 |
+
|
| 154 |
+
assert tab_index == 2 # Go to Images tab
|
| 155 |
+
assert session.tab2_complete is True
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class TestImagesTab:
|
| 159 |
+
"""Test Tab 3: Images."""
|
| 160 |
+
|
| 161 |
+
def test_add_image_valid(self):
|
| 162 |
+
session = SessionState()
|
| 163 |
+
session.rooms.append(RoomFormData(id="room-001", name="Room 1", length_ft=100, width_ft=50, ceiling_height_ft=20))
|
| 164 |
+
|
| 165 |
+
# Create a test image
|
| 166 |
+
test_image = Image.new("RGB", (100, 100), color="red")
|
| 167 |
+
|
| 168 |
+
result = images.add_image(
|
| 169 |
+
session,
|
| 170 |
+
image=test_image,
|
| 171 |
+
room_id="room-001",
|
| 172 |
+
description="Test image",
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
session = result[0]
|
| 176 |
+
gallery_data = result[1]
|
| 177 |
+
validation_html = result[2]
|
| 178 |
+
|
| 179 |
+
assert len(session.images) == 1
|
| 180 |
+
assert session.images[0].room_id == "room-001"
|
| 181 |
+
assert "✓" in validation_html
|
| 182 |
+
# Image should be in store
|
| 183 |
+
assert image_store.get(session.images[0].id) is not None
|
| 184 |
+
|
| 185 |
+
# Cleanup
|
| 186 |
+
image_store.clear()
|
| 187 |
+
|
| 188 |
+
def test_add_image_no_room(self):
|
| 189 |
+
session = SessionState()
|
| 190 |
+
test_image = Image.new("RGB", (100, 100), color="red")
|
| 191 |
+
|
| 192 |
+
result = images.add_image(
|
| 193 |
+
session,
|
| 194 |
+
image=test_image,
|
| 195 |
+
room_id="", # No room selected
|
| 196 |
+
description="",
|
| 197 |
+
)
|
| 198 |
+
|
| 199 |
+
session = result[0]
|
| 200 |
+
validation_html = result[2]
|
| 201 |
+
|
| 202 |
+
assert len(session.images) == 0
|
| 203 |
+
assert "select a room" in validation_html
|
| 204 |
+
|
| 205 |
+
def test_validate_missing_images(self):
|
| 206 |
+
session = SessionState()
|
| 207 |
+
session.rooms.append(RoomFormData(id="room-001", name="Room 1"))
|
| 208 |
+
# Add image metadata but don't store the actual image
|
| 209 |
+
session.images.append(ImageFormData(id="img-missing", filename="test.jpg", room_id="room-001"))
|
| 210 |
+
|
| 211 |
+
session, html, tab_index = images.validate_and_continue(session)
|
| 212 |
+
|
| 213 |
+
assert tab_index == 2 # Stay on Images tab
|
| 214 |
+
assert "re-uploaded" in html
|
| 215 |
+
|
| 216 |
+
def test_update_room_choices(self):
|
| 217 |
+
session = SessionState()
|
| 218 |
+
session.rooms.append(RoomFormData(id="room-001", name="Room 1"))
|
| 219 |
+
session.rooms.append(RoomFormData(id="room-002", name="Room 2"))
|
| 220 |
+
|
| 221 |
+
update = images.update_room_choices(session)
|
| 222 |
+
|
| 223 |
+
assert "choices" in update
|
| 224 |
+
assert len(update["choices"]) == 2
|
| 225 |
+
|
| 226 |
+
|
| 227 |
+
class TestObservationsTab:
|
| 228 |
+
"""Test Tab 4: Observations."""
|
| 229 |
+
|
| 230 |
+
def test_update_session_from_form(self):
|
| 231 |
+
session = SessionState()
|
| 232 |
+
session = observations.update_session_from_form(
|
| 233 |
+
session,
|
| 234 |
+
smoke_odor=True,
|
| 235 |
+
odor_intensity="Strong",
|
| 236 |
+
visible_soot=True,
|
| 237 |
+
soot_description="Heavy on ceiling",
|
| 238 |
+
large_char=True,
|
| 239 |
+
char_density="Moderate",
|
| 240 |
+
ash_residue=False,
|
| 241 |
+
ash_description="",
|
| 242 |
+
surface_discoloration=True,
|
| 243 |
+
discoloration_description="Yellowing",
|
| 244 |
+
dust_interference=False,
|
| 245 |
+
dust_notes="",
|
| 246 |
+
wildfire_indicators=False,
|
| 247 |
+
wildfire_notes="",
|
| 248 |
+
additional_notes="Test notes",
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
assert session.observations.smoke_fire_odor is True
|
| 252 |
+
assert session.observations.odor_intensity == "strong"
|
| 253 |
+
assert session.observations.char_density_estimate == "moderate"
|
| 254 |
+
assert session.observations.additional_notes == "Test notes"
|
| 255 |
+
|
| 256 |
+
def test_validate_and_continue(self):
|
| 257 |
+
session = SessionState()
|
| 258 |
+
|
| 259 |
+
session, html, tab_index = observations.validate_and_continue(
|
| 260 |
+
session,
|
| 261 |
+
smoke_odor=True,
|
| 262 |
+
odor_intensity="Moderate",
|
| 263 |
+
visible_soot=True,
|
| 264 |
+
soot_description="",
|
| 265 |
+
large_char=False,
|
| 266 |
+
char_density="None",
|
| 267 |
+
ash_residue=False,
|
| 268 |
+
ash_description="",
|
| 269 |
+
surface_discoloration=False,
|
| 270 |
+
discoloration_description="",
|
| 271 |
+
dust_interference=False,
|
| 272 |
+
dust_notes="",
|
| 273 |
+
wildfire_indicators=False,
|
| 274 |
+
wildfire_notes="",
|
| 275 |
+
additional_notes="",
|
| 276 |
+
)
|
| 277 |
+
|
| 278 |
+
assert tab_index == 4 # Go to Results tab
|
| 279 |
+
assert session.tab4_complete is True
|
| 280 |
+
|
| 281 |
+
def test_load_form_from_session(self):
|
| 282 |
+
session = SessionState()
|
| 283 |
+
session.observations.smoke_fire_odor = True
|
| 284 |
+
session.observations.odor_intensity = "strong"
|
| 285 |
+
session.observations.char_density_estimate = "dense"
|
| 286 |
+
|
| 287 |
+
values = observations.load_form_from_session(session)
|
| 288 |
+
|
| 289 |
+
assert values[0] is True # smoke_odor
|
| 290 |
+
assert values[1] == "Strong" # odor_intensity (UI value)
|
| 291 |
+
assert values[5] == "Dense" # char_density (UI value)
|
| 292 |
+
|
| 293 |
+
|
| 294 |
+
class TestResultsTab:
|
| 295 |
+
"""Test Tab 5: Generate Results."""
|
| 296 |
+
|
| 297 |
+
def test_check_preflight_incomplete(self):
|
| 298 |
+
session = SessionState()
|
| 299 |
+
# No data added
|
| 300 |
+
|
| 301 |
+
html = results.check_preflight(session)
|
| 302 |
+
|
| 303 |
+
assert "Cannot Generate" in html
|
| 304 |
+
assert "Project name is required" in html
|
| 305 |
+
|
| 306 |
+
def test_check_preflight_complete(self):
|
| 307 |
+
session = SessionState()
|
| 308 |
+
session.project.project_name = "Test"
|
| 309 |
+
session.project.address = "123 Main"
|
| 310 |
+
session.project.city = "City"
|
| 311 |
+
session.project.state = "IL"
|
| 312 |
+
session.project.zip_code = "12345"
|
| 313 |
+
session.project.client_name = "Client"
|
| 314 |
+
session.project.fire_date = "2024-01-01"
|
| 315 |
+
session.project.assessment_date = "2024-01-02"
|
| 316 |
+
session.project.assessor_name = "Assessor"
|
| 317 |
+
|
| 318 |
+
session.rooms.append(RoomFormData(
|
| 319 |
+
id="room-001",
|
| 320 |
+
name="Room 1",
|
| 321 |
+
length_ft=100,
|
| 322 |
+
width_ft=50,
|
| 323 |
+
ceiling_height_ft=20,
|
| 324 |
+
))
|
| 325 |
+
|
| 326 |
+
# Add image with actual bytes in store
|
| 327 |
+
img_id = "img-001"
|
| 328 |
+
session.images.append(ImageFormData(id=img_id, filename="test.jpg", room_id="room-001"))
|
| 329 |
+
test_image = Image.new("RGB", (100, 100), color="red")
|
| 330 |
+
img_bytes = io.BytesIO()
|
| 331 |
+
test_image.save(img_bytes, format="PNG")
|
| 332 |
+
image_store.store(img_id, img_bytes.getvalue())
|
| 333 |
+
|
| 334 |
+
html = results.check_preflight(session)
|
| 335 |
+
|
| 336 |
+
assert "Ready to Generate" in html
|
| 337 |
+
assert "Test" in html # Project name
|
| 338 |
+
|
| 339 |
+
# Cleanup
|
| 340 |
+
image_store.clear()
|
| 341 |
+
|
| 342 |
+
def test_generate_assessment_incomplete(self):
|
| 343 |
+
session = SessionState()
|
| 344 |
+
# Missing required data
|
| 345 |
+
|
| 346 |
+
result = results.generate_assessment(session)
|
| 347 |
+
|
| 348 |
+
session = result[0]
|
| 349 |
+
status = result[1]
|
| 350 |
+
sow = result[5]
|
| 351 |
+
|
| 352 |
+
assert "Error" in status
|
| 353 |
+
assert "Error" in sow
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class TestMapConversions:
|
| 357 |
+
"""Test UI-to-schema value mappings."""
|
| 358 |
+
|
| 359 |
+
def test_facility_map(self):
|
| 360 |
+
assert project.FACILITY_MAP["Non-Operational"] == "non-operational"
|
| 361 |
+
assert project.FACILITY_MAP["Operational"] == "operational"
|
| 362 |
+
assert project.FACILITY_MAP["Public/Childcare"] == "public-childcare"
|
| 363 |
+
|
| 364 |
+
def test_facility_map_reverse(self):
|
| 365 |
+
assert project.FACILITY_MAP_REVERSE["non-operational"] == "Non-Operational"
|
| 366 |
+
assert project.FACILITY_MAP_REVERSE["operational"] == "Operational"
|
| 367 |
+
assert project.FACILITY_MAP_REVERSE["public-childcare"] == "Public/Childcare"
|
| 368 |
+
|
| 369 |
+
def test_era_map(self):
|
| 370 |
+
assert project.ERA_MAP["Pre-1980"] == "pre-1980"
|
| 371 |
+
assert project.ERA_MAP["1980-2000"] == "1980-2000"
|
| 372 |
+
assert project.ERA_MAP["Post-2000"] == "post-2000"
|
| 373 |
+
|
| 374 |
+
def test_odor_map(self):
|
| 375 |
+
assert observations.ODOR_MAP["None"] == "none"
|
| 376 |
+
assert observations.ODOR_MAP["Strong"] == "strong"
|
| 377 |
+
|
| 378 |
+
def test_char_density_map(self):
|
| 379 |
+
assert observations.CHAR_DENSITY_MAP["None"] is None
|
| 380 |
+
assert observations.CHAR_DENSITY_MAP["Sparse"] == "sparse"
|
| 381 |
+
assert observations.CHAR_DENSITY_MAP["Dense"] == "dense"
|
tests/test_ui_state.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tests for UI state management."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
from ui.state import (
|
| 7 |
+
SessionState,
|
| 8 |
+
AssessmentHistory,
|
| 9 |
+
ProjectFormData,
|
| 10 |
+
RoomFormData,
|
| 11 |
+
ImageFormData,
|
| 12 |
+
ObservationsFormData,
|
| 13 |
+
create_new_session,
|
| 14 |
+
session_to_json,
|
| 15 |
+
session_from_json,
|
| 16 |
+
history_to_json,
|
| 17 |
+
history_from_json,
|
| 18 |
+
)
|
| 19 |
+
from ui.components import (
|
| 20 |
+
create_validation_message,
|
| 21 |
+
create_room_table_data,
|
| 22 |
+
create_history_dropdown_choices,
|
| 23 |
+
create_stats_dict,
|
| 24 |
+
ImageStore,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class TestSessionState:
|
| 29 |
+
"""Test SessionState model."""
|
| 30 |
+
|
| 31 |
+
def test_create_new_session(self):
|
| 32 |
+
session = create_new_session()
|
| 33 |
+
assert session.session_id is not None
|
| 34 |
+
assert len(session.session_id) == 32 # UUID hex
|
| 35 |
+
assert session.tab1_complete is False
|
| 36 |
+
assert len(session.rooms) == 0
|
| 37 |
+
|
| 38 |
+
def test_session_serialization(self):
|
| 39 |
+
session = SessionState()
|
| 40 |
+
session.project.project_name = "Test Project"
|
| 41 |
+
session.rooms.append(RoomFormData(
|
| 42 |
+
name="Room 1",
|
| 43 |
+
length_ft=100,
|
| 44 |
+
width_ft=50,
|
| 45 |
+
ceiling_height_ft=20,
|
| 46 |
+
))
|
| 47 |
+
|
| 48 |
+
# Serialize
|
| 49 |
+
json_str = session_to_json(session)
|
| 50 |
+
assert "Test Project" in json_str
|
| 51 |
+
assert "Room 1" in json_str
|
| 52 |
+
|
| 53 |
+
# Deserialize
|
| 54 |
+
loaded = session_from_json(json_str)
|
| 55 |
+
assert loaded.project.project_name == "Test Project"
|
| 56 |
+
assert len(loaded.rooms) == 1
|
| 57 |
+
assert loaded.rooms[0].name == "Room 1"
|
| 58 |
+
|
| 59 |
+
def test_session_validation_tab1_incomplete(self):
|
| 60 |
+
session = SessionState()
|
| 61 |
+
is_valid, errors = session.validate_tab1()
|
| 62 |
+
assert is_valid is False
|
| 63 |
+
assert "Project name is required" in errors
|
| 64 |
+
assert "Address is required" in errors
|
| 65 |
+
|
| 66 |
+
def test_session_validation_tab1_complete(self):
|
| 67 |
+
session = SessionState()
|
| 68 |
+
session.project = ProjectFormData(
|
| 69 |
+
project_name="Test Project",
|
| 70 |
+
address="123 Main St",
|
| 71 |
+
city="Springfield",
|
| 72 |
+
state="IL",
|
| 73 |
+
zip_code="62701",
|
| 74 |
+
client_name="Test Client",
|
| 75 |
+
fire_date="2024-12-01",
|
| 76 |
+
assessment_date="2024-12-15",
|
| 77 |
+
assessor_name="John Smith",
|
| 78 |
+
)
|
| 79 |
+
is_valid, errors = session.validate_tab1()
|
| 80 |
+
assert is_valid is True
|
| 81 |
+
assert len(errors) == 0
|
| 82 |
+
|
| 83 |
+
def test_session_validation_tab2_no_rooms(self):
|
| 84 |
+
session = SessionState()
|
| 85 |
+
is_valid, errors = session.validate_tab2()
|
| 86 |
+
assert is_valid is False
|
| 87 |
+
assert "At least one room is required" in errors
|
| 88 |
+
|
| 89 |
+
def test_session_validation_tab2_invalid_dimensions(self):
|
| 90 |
+
session = SessionState()
|
| 91 |
+
session.rooms.append(RoomFormData(
|
| 92 |
+
name="Room 1",
|
| 93 |
+
length_ft=0, # Invalid
|
| 94 |
+
width_ft=50,
|
| 95 |
+
ceiling_height_ft=20,
|
| 96 |
+
))
|
| 97 |
+
is_valid, errors = session.validate_tab2()
|
| 98 |
+
assert is_valid is False
|
| 99 |
+
assert any("Length must be greater than 0" in e for e in errors)
|
| 100 |
+
|
| 101 |
+
def test_session_validation_tab2_complete(self):
|
| 102 |
+
session = SessionState()
|
| 103 |
+
session.rooms.append(RoomFormData(
|
| 104 |
+
name="Room 1",
|
| 105 |
+
length_ft=100,
|
| 106 |
+
width_ft=50,
|
| 107 |
+
ceiling_height_ft=20,
|
| 108 |
+
))
|
| 109 |
+
is_valid, errors = session.validate_tab2()
|
| 110 |
+
assert is_valid is True
|
| 111 |
+
|
| 112 |
+
def test_session_validation_tab3_no_images(self):
|
| 113 |
+
session = SessionState()
|
| 114 |
+
is_valid, errors = session.validate_tab3()
|
| 115 |
+
assert is_valid is False
|
| 116 |
+
assert "At least one image is required" in errors
|
| 117 |
+
|
| 118 |
+
def test_session_validation_tab3_complete(self):
|
| 119 |
+
session = SessionState()
|
| 120 |
+
session.rooms.append(RoomFormData(id="room-001", name="Room 1"))
|
| 121 |
+
session.images.append(ImageFormData(
|
| 122 |
+
filename="test.jpg",
|
| 123 |
+
room_id="room-001",
|
| 124 |
+
))
|
| 125 |
+
is_valid, errors = session.validate_tab3()
|
| 126 |
+
assert is_valid is True
|
| 127 |
+
|
| 128 |
+
def test_session_can_generate(self):
|
| 129 |
+
session = SessionState()
|
| 130 |
+
# Fill all required fields
|
| 131 |
+
session.project = ProjectFormData(
|
| 132 |
+
project_name="Test",
|
| 133 |
+
address="123 Main",
|
| 134 |
+
city="City",
|
| 135 |
+
state="IL",
|
| 136 |
+
zip_code="12345",
|
| 137 |
+
client_name="Client",
|
| 138 |
+
fire_date="2024-01-01",
|
| 139 |
+
assessment_date="2024-01-02",
|
| 140 |
+
assessor_name="Assessor",
|
| 141 |
+
)
|
| 142 |
+
session.rooms.append(RoomFormData(
|
| 143 |
+
id="room-001",
|
| 144 |
+
name="Room 1",
|
| 145 |
+
length_ft=100,
|
| 146 |
+
width_ft=50,
|
| 147 |
+
ceiling_height_ft=20,
|
| 148 |
+
))
|
| 149 |
+
session.images.append(ImageFormData(
|
| 150 |
+
filename="test.jpg",
|
| 151 |
+
room_id="room-001",
|
| 152 |
+
))
|
| 153 |
+
|
| 154 |
+
can_gen, errors = session.can_generate()
|
| 155 |
+
assert can_gen is True
|
| 156 |
+
assert len(errors) == 0
|
| 157 |
+
|
| 158 |
+
def test_session_display_name(self):
|
| 159 |
+
session = SessionState()
|
| 160 |
+
# Default name from session ID
|
| 161 |
+
assert session.session_id[:8] in session.get_display_name()
|
| 162 |
+
|
| 163 |
+
# Name from project
|
| 164 |
+
session.project.project_name = "My Project"
|
| 165 |
+
assert session.get_display_name() == "My Project"
|
| 166 |
+
|
| 167 |
+
# Explicit name takes priority
|
| 168 |
+
session.name = "Custom Name"
|
| 169 |
+
assert session.get_display_name() == "Custom Name"
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
class TestAssessmentHistory:
|
| 173 |
+
"""Test AssessmentHistory model."""
|
| 174 |
+
|
| 175 |
+
def test_empty_history(self):
|
| 176 |
+
history = AssessmentHistory()
|
| 177 |
+
assert len(history.assessments) == 0
|
| 178 |
+
assert history.current_session_id is None
|
| 179 |
+
|
| 180 |
+
def test_add_assessment(self):
|
| 181 |
+
history = AssessmentHistory()
|
| 182 |
+
session = SessionState()
|
| 183 |
+
session.project.project_name = "Test"
|
| 184 |
+
|
| 185 |
+
history.add_assessment(session)
|
| 186 |
+
assert len(history.assessments) == 1
|
| 187 |
+
assert history.assessments[0].session_id == session.session_id
|
| 188 |
+
|
| 189 |
+
def test_add_assessment_updates_existing(self):
|
| 190 |
+
history = AssessmentHistory()
|
| 191 |
+
session = SessionState()
|
| 192 |
+
session.project.project_name = "Original"
|
| 193 |
+
|
| 194 |
+
history.add_assessment(session)
|
| 195 |
+
|
| 196 |
+
# Update and re-add
|
| 197 |
+
session.project.project_name = "Updated"
|
| 198 |
+
history.add_assessment(session)
|
| 199 |
+
|
| 200 |
+
# Should still have only 1 entry
|
| 201 |
+
assert len(history.assessments) == 1
|
| 202 |
+
assert history.assessments[0].project.project_name == "Updated"
|
| 203 |
+
|
| 204 |
+
def test_history_limit(self):
|
| 205 |
+
history = AssessmentHistory()
|
| 206 |
+
|
| 207 |
+
# Add 25 assessments
|
| 208 |
+
for i in range(25):
|
| 209 |
+
session = SessionState()
|
| 210 |
+
session.project.project_name = f"Project {i}"
|
| 211 |
+
history.add_assessment(session)
|
| 212 |
+
|
| 213 |
+
# Should only keep 20
|
| 214 |
+
assert len(history.assessments) == 20
|
| 215 |
+
# Most recent should be first
|
| 216 |
+
assert history.assessments[0].project.project_name == "Project 24"
|
| 217 |
+
|
| 218 |
+
def test_get_assessment(self):
|
| 219 |
+
history = AssessmentHistory()
|
| 220 |
+
session = SessionState()
|
| 221 |
+
history.add_assessment(session)
|
| 222 |
+
|
| 223 |
+
retrieved = history.get_assessment(session.session_id)
|
| 224 |
+
assert retrieved is not None
|
| 225 |
+
assert retrieved.session_id == session.session_id
|
| 226 |
+
|
| 227 |
+
# Non-existent
|
| 228 |
+
assert history.get_assessment("nonexistent") is None
|
| 229 |
+
|
| 230 |
+
def test_remove_assessment(self):
|
| 231 |
+
history = AssessmentHistory()
|
| 232 |
+
session = SessionState()
|
| 233 |
+
history.add_assessment(session)
|
| 234 |
+
|
| 235 |
+
history.remove_assessment(session.session_id)
|
| 236 |
+
assert len(history.assessments) == 0
|
| 237 |
+
|
| 238 |
+
def test_history_serialization(self):
|
| 239 |
+
history = AssessmentHistory()
|
| 240 |
+
session = SessionState()
|
| 241 |
+
session.project.project_name = "Test Project"
|
| 242 |
+
history.add_assessment(session)
|
| 243 |
+
|
| 244 |
+
json_str = history_to_json(history)
|
| 245 |
+
loaded = history_from_json(json_str)
|
| 246 |
+
|
| 247 |
+
assert len(loaded.assessments) == 1
|
| 248 |
+
assert loaded.assessments[0].project.project_name == "Test Project"
|
| 249 |
+
|
| 250 |
+
def test_history_items(self):
|
| 251 |
+
history = AssessmentHistory()
|
| 252 |
+
session = SessionState()
|
| 253 |
+
session.project.project_name = "Test Project"
|
| 254 |
+
session.has_results = True
|
| 255 |
+
history.add_assessment(session)
|
| 256 |
+
|
| 257 |
+
items = history.get_history_items()
|
| 258 |
+
assert len(items) == 1
|
| 259 |
+
assert items[0]["name"] == "Test Project"
|
| 260 |
+
assert items[0]["has_results"] is True
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
class TestUIComponents:
|
| 264 |
+
"""Test UI component helpers."""
|
| 265 |
+
|
| 266 |
+
def test_validation_message_success(self):
|
| 267 |
+
msg = create_validation_message(True, [], "All good!")
|
| 268 |
+
assert "✓" in msg
|
| 269 |
+
assert "All good!" in msg
|
| 270 |
+
|
| 271 |
+
def test_validation_message_failure(self):
|
| 272 |
+
msg = create_validation_message(False, ["Error 1", "Error 2"])
|
| 273 |
+
assert "⚠" in msg
|
| 274 |
+
assert "Error 1" in msg
|
| 275 |
+
assert "Error 2" in msg
|
| 276 |
+
|
| 277 |
+
def test_room_table_data(self):
|
| 278 |
+
session = SessionState()
|
| 279 |
+
session.rooms.append(RoomFormData(
|
| 280 |
+
name="Room 1",
|
| 281 |
+
length_ft=100,
|
| 282 |
+
width_ft=50,
|
| 283 |
+
ceiling_height_ft=20,
|
| 284 |
+
))
|
| 285 |
+
|
| 286 |
+
data = create_room_table_data(session)
|
| 287 |
+
assert len(data) == 1
|
| 288 |
+
assert data[0][0] == "Room 1"
|
| 289 |
+
assert "100 x 50 x 20" in data[0][1]
|
| 290 |
+
assert "5,000" in data[0][2] # Area
|
| 291 |
+
assert "100,000" in data[0][3] # Volume
|
| 292 |
+
|
| 293 |
+
def test_history_dropdown_choices(self):
|
| 294 |
+
history = AssessmentHistory()
|
| 295 |
+
session = SessionState()
|
| 296 |
+
session.project.project_name = "Test Project"
|
| 297 |
+
history.add_assessment(session)
|
| 298 |
+
|
| 299 |
+
choices = create_history_dropdown_choices(history)
|
| 300 |
+
assert len(choices) == 2 # "New Assessment" + 1 saved
|
| 301 |
+
assert choices[0][0] == "-- New Assessment --"
|
| 302 |
+
assert "Test Project" in choices[1][0]
|
| 303 |
+
|
| 304 |
+
def test_stats_dict(self):
|
| 305 |
+
session = SessionState()
|
| 306 |
+
session.rooms.append(RoomFormData(
|
| 307 |
+
name="Room 1",
|
| 308 |
+
length_ft=100,
|
| 309 |
+
width_ft=50,
|
| 310 |
+
ceiling_height_ft=20,
|
| 311 |
+
))
|
| 312 |
+
session.images.append(ImageFormData(filename="test.jpg", room_id="room-001"))
|
| 313 |
+
|
| 314 |
+
stats = create_stats_dict(session)
|
| 315 |
+
assert stats["rooms"] == 1
|
| 316 |
+
assert stats["images"] == 1
|
| 317 |
+
assert stats["total_floor_area_sf"] == "5,000"
|
| 318 |
+
assert stats["total_volume_cf"] == "100,000"
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
class TestImageStore:
|
| 322 |
+
"""Test ImageStore for in-memory image storage."""
|
| 323 |
+
|
| 324 |
+
def test_store_and_get(self):
|
| 325 |
+
store = ImageStore()
|
| 326 |
+
store.store("img-001", b"test image bytes")
|
| 327 |
+
|
| 328 |
+
assert store.get("img-001") == b"test image bytes"
|
| 329 |
+
assert store.get("nonexistent") is None
|
| 330 |
+
|
| 331 |
+
def test_remove(self):
|
| 332 |
+
store = ImageStore()
|
| 333 |
+
store.store("img-001", b"test")
|
| 334 |
+
store.remove("img-001")
|
| 335 |
+
|
| 336 |
+
assert store.get("img-001") is None
|
| 337 |
+
|
| 338 |
+
def test_clear(self):
|
| 339 |
+
store = ImageStore()
|
| 340 |
+
store.store("img-001", b"test1")
|
| 341 |
+
store.store("img-002", b"test2")
|
| 342 |
+
store.clear()
|
| 343 |
+
|
| 344 |
+
assert store.get("img-001") is None
|
| 345 |
+
assert store.get("img-002") is None
|
| 346 |
+
|
| 347 |
+
def test_missing_ids(self):
|
| 348 |
+
store = ImageStore()
|
| 349 |
+
store.store("img-001", b"test")
|
| 350 |
+
|
| 351 |
+
missing = store.get_missing_ids(["img-001", "img-002", "img-003"])
|
| 352 |
+
assert missing == ["img-002", "img-003"]
|
| 353 |
+
|
| 354 |
+
def test_has_all(self):
|
| 355 |
+
store = ImageStore()
|
| 356 |
+
store.store("img-001", b"test1")
|
| 357 |
+
store.store("img-002", b"test2")
|
| 358 |
+
|
| 359 |
+
assert store.has_all(["img-001", "img-002"]) is True
|
| 360 |
+
assert store.has_all(["img-001", "img-003"]) is False
|
ui/__init__.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""UI components for FDAM AI Pipeline."""
|
| 2 |
+
|
| 3 |
+
from .state import (
|
| 4 |
+
# Form data models
|
| 5 |
+
ProjectFormData,
|
| 6 |
+
RoomFormData,
|
| 7 |
+
ImageFormData,
|
| 8 |
+
ObservationsFormData,
|
| 9 |
+
# Session management
|
| 10 |
+
SessionState,
|
| 11 |
+
AssessmentHistory,
|
| 12 |
+
# Helpers
|
| 13 |
+
create_new_session,
|
| 14 |
+
session_to_json,
|
| 15 |
+
session_from_json,
|
| 16 |
+
history_to_json,
|
| 17 |
+
history_from_json,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
from .storage import (
|
| 21 |
+
STORAGE_KEY_SESSION,
|
| 22 |
+
STORAGE_KEY_HISTORY,
|
| 23 |
+
LOCALSTORAGE_JS,
|
| 24 |
+
JS_SAVE_SESSION,
|
| 25 |
+
JS_LOAD_SESSION,
|
| 26 |
+
JS_SAVE_HISTORY,
|
| 27 |
+
JS_LOAD_HISTORY,
|
| 28 |
+
JS_AUTO_LOAD,
|
| 29 |
+
get_head_html,
|
| 30 |
+
create_save_trigger_js,
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from .components import (
|
| 34 |
+
create_validation_message,
|
| 35 |
+
create_progress_html,
|
| 36 |
+
create_history_dropdown_choices,
|
| 37 |
+
create_room_table_data,
|
| 38 |
+
create_tab_status_indicator,
|
| 39 |
+
create_stats_dict,
|
| 40 |
+
format_validation_errors_html,
|
| 41 |
+
format_success_html,
|
| 42 |
+
format_warning_html,
|
| 43 |
+
format_info_html,
|
| 44 |
+
ImageStore,
|
| 45 |
+
image_store,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
__all__ = [
|
| 49 |
+
# Form data models
|
| 50 |
+
"ProjectFormData",
|
| 51 |
+
"RoomFormData",
|
| 52 |
+
"ImageFormData",
|
| 53 |
+
"ObservationsFormData",
|
| 54 |
+
# Session management
|
| 55 |
+
"SessionState",
|
| 56 |
+
"AssessmentHistory",
|
| 57 |
+
"create_new_session",
|
| 58 |
+
"session_to_json",
|
| 59 |
+
"session_from_json",
|
| 60 |
+
"history_to_json",
|
| 61 |
+
"history_from_json",
|
| 62 |
+
# Storage
|
| 63 |
+
"STORAGE_KEY_SESSION",
|
| 64 |
+
"STORAGE_KEY_HISTORY",
|
| 65 |
+
"LOCALSTORAGE_JS",
|
| 66 |
+
"JS_SAVE_SESSION",
|
| 67 |
+
"JS_LOAD_SESSION",
|
| 68 |
+
"JS_SAVE_HISTORY",
|
| 69 |
+
"JS_LOAD_HISTORY",
|
| 70 |
+
"JS_AUTO_LOAD",
|
| 71 |
+
"get_head_html",
|
| 72 |
+
"create_save_trigger_js",
|
| 73 |
+
# Components
|
| 74 |
+
"create_validation_message",
|
| 75 |
+
"create_progress_html",
|
| 76 |
+
"create_history_dropdown_choices",
|
| 77 |
+
"create_room_table_data",
|
| 78 |
+
"create_tab_status_indicator",
|
| 79 |
+
"create_stats_dict",
|
| 80 |
+
"format_validation_errors_html",
|
| 81 |
+
"format_success_html",
|
| 82 |
+
"format_warning_html",
|
| 83 |
+
"format_info_html",
|
| 84 |
+
"ImageStore",
|
| 85 |
+
"image_store",
|
| 86 |
+
]
|
ui/components.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Reusable UI components for FDAM AI Pipeline.
|
| 2 |
+
|
| 3 |
+
Provides helper functions for common Gradio UI patterns.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from typing import Callable, Optional
|
| 8 |
+
|
| 9 |
+
from .state import SessionState, AssessmentHistory
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def create_validation_message(
|
| 13 |
+
is_valid: bool,
|
| 14 |
+
errors: list[str],
|
| 15 |
+
success_msg: str = "All required fields are complete."
|
| 16 |
+
) -> str:
|
| 17 |
+
"""Create a formatted validation message.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
is_valid: Whether validation passed
|
| 21 |
+
errors: List of validation errors
|
| 22 |
+
success_msg: Message to show on success
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
Formatted message string
|
| 26 |
+
"""
|
| 27 |
+
if is_valid:
|
| 28 |
+
return f"✓ {success_msg}"
|
| 29 |
+
else:
|
| 30 |
+
error_list = "\n".join(f"• {e}" for e in errors)
|
| 31 |
+
return f"⚠ Please fix the following:\n{error_list}"
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def create_progress_html(
|
| 35 |
+
current_stage: int,
|
| 36 |
+
total_stages: int,
|
| 37 |
+
stage_name: str,
|
| 38 |
+
percentage: Optional[float] = None
|
| 39 |
+
) -> str:
|
| 40 |
+
"""Create HTML for progress display during processing.
|
| 41 |
+
|
| 42 |
+
Args:
|
| 43 |
+
current_stage: Current stage number (1-indexed)
|
| 44 |
+
total_stages: Total number of stages
|
| 45 |
+
stage_name: Name of current stage
|
| 46 |
+
percentage: Optional percentage override
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
HTML string for progress display
|
| 50 |
+
"""
|
| 51 |
+
if percentage is None:
|
| 52 |
+
percentage = (current_stage / total_stages) * 100
|
| 53 |
+
|
| 54 |
+
return f"""
|
| 55 |
+
<div style="margin: 10px 0;">
|
| 56 |
+
<div style="display: flex; justify-content: space-between; margin-bottom: 5px;">
|
| 57 |
+
<span><strong>Stage {current_stage}/{total_stages}:</strong> {stage_name}</span>
|
| 58 |
+
<span>{percentage:.0f}%</span>
|
| 59 |
+
</div>
|
| 60 |
+
<div style="background: #e0e0e0; border-radius: 4px; height: 20px; overflow: hidden;">
|
| 61 |
+
<div style="background: #4CAF50; height: 100%; width: {percentage}%; transition: width 0.3s;"></div>
|
| 62 |
+
</div>
|
| 63 |
+
</div>
|
| 64 |
+
"""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def create_history_dropdown_choices(history: AssessmentHistory) -> list[tuple[str, str]]:
|
| 68 |
+
"""Create choices for history dropdown.
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
history: Assessment history object
|
| 72 |
+
|
| 73 |
+
Returns:
|
| 74 |
+
List of (label, value) tuples for dropdown
|
| 75 |
+
"""
|
| 76 |
+
choices = [("-- New Assessment --", "new")]
|
| 77 |
+
for item in history.get_history_items():
|
| 78 |
+
label = item["name"]
|
| 79 |
+
if item["has_results"]:
|
| 80 |
+
label += " ✓"
|
| 81 |
+
# Format date nicely
|
| 82 |
+
try:
|
| 83 |
+
from datetime import datetime
|
| 84 |
+
dt = datetime.fromisoformat(item["updated"])
|
| 85 |
+
date_str = dt.strftime("%m/%d %H:%M")
|
| 86 |
+
label += f" ({date_str})"
|
| 87 |
+
except Exception:
|
| 88 |
+
pass
|
| 89 |
+
choices.append((label, item["id"]))
|
| 90 |
+
return choices
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def create_room_table_data(session: SessionState) -> list[list]:
|
| 94 |
+
"""Create data for rooms table display.
|
| 95 |
+
|
| 96 |
+
Args:
|
| 97 |
+
session: Current session state
|
| 98 |
+
|
| 99 |
+
Returns:
|
| 100 |
+
List of rows for dataframe
|
| 101 |
+
"""
|
| 102 |
+
rows = []
|
| 103 |
+
for room in session.rooms:
|
| 104 |
+
area = room.length_ft * room.width_ft
|
| 105 |
+
volume = area * room.ceiling_height_ft
|
| 106 |
+
rows.append([
|
| 107 |
+
room.name,
|
| 108 |
+
f"{room.length_ft:.0f} x {room.width_ft:.0f} x {room.ceiling_height_ft:.0f}",
|
| 109 |
+
f"{area:,.0f}",
|
| 110 |
+
f"{volume:,.0f}",
|
| 111 |
+
])
|
| 112 |
+
return rows
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def create_tab_status_indicator(
|
| 116 |
+
tab_number: int,
|
| 117 |
+
is_complete: bool,
|
| 118 |
+
is_current: bool = False
|
| 119 |
+
) -> str:
|
| 120 |
+
"""Create a status indicator for tab navigation.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
tab_number: Tab number (1-5)
|
| 124 |
+
is_complete: Whether tab is complete
|
| 125 |
+
is_current: Whether this is the current tab
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
Status indicator string
|
| 129 |
+
"""
|
| 130 |
+
if is_complete:
|
| 131 |
+
return f"✓ Tab {tab_number}"
|
| 132 |
+
elif is_current:
|
| 133 |
+
return f"● Tab {tab_number}"
|
| 134 |
+
else:
|
| 135 |
+
return f"○ Tab {tab_number}"
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
def create_stats_dict(session: SessionState) -> dict:
|
| 139 |
+
"""Create statistics dictionary for display.
|
| 140 |
+
|
| 141 |
+
Args:
|
| 142 |
+
session: Current session state
|
| 143 |
+
|
| 144 |
+
Returns:
|
| 145 |
+
Dictionary of statistics
|
| 146 |
+
"""
|
| 147 |
+
total_area = sum(r.length_ft * r.width_ft for r in session.rooms)
|
| 148 |
+
total_volume = sum(
|
| 149 |
+
r.length_ft * r.width_ft * r.ceiling_height_ft
|
| 150 |
+
for r in session.rooms
|
| 151 |
+
)
|
| 152 |
+
|
| 153 |
+
return {
|
| 154 |
+
"rooms": len(session.rooms),
|
| 155 |
+
"images": len(session.images),
|
| 156 |
+
"total_floor_area_sf": f"{total_area:,.0f}",
|
| 157 |
+
"total_volume_cf": f"{total_volume:,.0f}",
|
| 158 |
+
"facility_classification": session.project.facility_classification or "Not set",
|
| 159 |
+
"construction_era": session.project.construction_era or "Not set",
|
| 160 |
+
}
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def format_validation_errors_html(errors: list[str]) -> str:
|
| 164 |
+
"""Format validation errors as HTML list.
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
errors: List of error messages
|
| 168 |
+
|
| 169 |
+
Returns:
|
| 170 |
+
HTML string
|
| 171 |
+
"""
|
| 172 |
+
if not errors:
|
| 173 |
+
return ""
|
| 174 |
+
|
| 175 |
+
items = "".join(f"<li>{e}</li>" for e in errors)
|
| 176 |
+
return f"""
|
| 177 |
+
<div style="background: #ffebee; border: 1px solid #ef5350; border-radius: 4px; padding: 10px; margin: 10px 0;">
|
| 178 |
+
<strong style="color: #c62828;">Please fix the following issues:</strong>
|
| 179 |
+
<ul style="margin: 5px 0 0 0; padding-left: 20px; color: #c62828;">
|
| 180 |
+
{items}
|
| 181 |
+
</ul>
|
| 182 |
+
</div>
|
| 183 |
+
"""
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def format_success_html(message: str) -> str:
|
| 187 |
+
"""Format success message as HTML.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
message: Success message
|
| 191 |
+
|
| 192 |
+
Returns:
|
| 193 |
+
HTML string
|
| 194 |
+
"""
|
| 195 |
+
return f"""
|
| 196 |
+
<div style="background: #e8f5e9; border: 1px solid #66bb6a; border-radius: 4px; padding: 10px; margin: 10px 0;">
|
| 197 |
+
<span style="color: #2e7d32;">✓ {message}</span>
|
| 198 |
+
</div>
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def format_warning_html(message: str) -> str:
|
| 203 |
+
"""Format warning message as HTML.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
message: Warning message
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
HTML string
|
| 210 |
+
"""
|
| 211 |
+
return f"""
|
| 212 |
+
<div style="background: #fff3e0; border: 1px solid #ffb74d; border-radius: 4px; padding: 10px; margin: 10px 0;">
|
| 213 |
+
<span style="color: #e65100;">⚠ {message}</span>
|
| 214 |
+
</div>
|
| 215 |
+
"""
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
def format_info_html(message: str) -> str:
|
| 219 |
+
"""Format info message as HTML.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
message: Info message
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
HTML string
|
| 226 |
+
"""
|
| 227 |
+
return f"""
|
| 228 |
+
<div style="background: #e3f2fd; border: 1px solid #64b5f6; border-radius: 4px; padding: 10px; margin: 10px 0;">
|
| 229 |
+
<span style="color: #1565c0;">ℹ {message}</span>
|
| 230 |
+
</div>
|
| 231 |
+
"""
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
# Image handling helpers (images stored separately from localStorage)
|
| 235 |
+
|
| 236 |
+
class ImageStore:
|
| 237 |
+
"""In-memory store for uploaded images.
|
| 238 |
+
|
| 239 |
+
Images are too large for localStorage, so they're kept in memory
|
| 240 |
+
and referenced by ID. Users are prompted to re-upload when resuming.
|
| 241 |
+
"""
|
| 242 |
+
|
| 243 |
+
def __init__(self):
|
| 244 |
+
self._images: dict[str, bytes] = {}
|
| 245 |
+
|
| 246 |
+
def store(self, image_id: str, image_bytes: bytes) -> None:
|
| 247 |
+
"""Store image bytes by ID."""
|
| 248 |
+
self._images[image_id] = image_bytes
|
| 249 |
+
|
| 250 |
+
def get(self, image_id: str) -> Optional[bytes]:
|
| 251 |
+
"""Get image bytes by ID."""
|
| 252 |
+
return self._images.get(image_id)
|
| 253 |
+
|
| 254 |
+
def remove(self, image_id: str) -> None:
|
| 255 |
+
"""Remove image by ID."""
|
| 256 |
+
self._images.pop(image_id, None)
|
| 257 |
+
|
| 258 |
+
def clear(self) -> None:
|
| 259 |
+
"""Clear all stored images."""
|
| 260 |
+
self._images.clear()
|
| 261 |
+
|
| 262 |
+
def get_missing_ids(self, expected_ids: list[str]) -> list[str]:
|
| 263 |
+
"""Get list of expected image IDs that are missing."""
|
| 264 |
+
return [id for id in expected_ids if id not in self._images]
|
| 265 |
+
|
| 266 |
+
def has_all(self, expected_ids: list[str]) -> bool:
|
| 267 |
+
"""Check if all expected images are present."""
|
| 268 |
+
return all(id in self._images for id in expected_ids)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
# Global image store instance
|
| 272 |
+
image_store = ImageStore()
|
ui/state.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Session state management for FDAM AI Pipeline.
|
| 2 |
+
|
| 3 |
+
Provides Pydantic models for session state and localStorage persistence.
|
| 4 |
+
Images are stored separately (not in localStorage due to size limits).
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import json
|
| 8 |
+
import uuid
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
+
from pydantic import BaseModel, Field
|
| 13 |
+
|
| 14 |
+
from schemas.input import (
|
| 15 |
+
ConstructionEra,
|
| 16 |
+
FacilityClassification,
|
| 17 |
+
OdorIntensity,
|
| 18 |
+
CharDensity,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# --- Form Data Models (for localStorage) ---
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
class ProjectFormData(BaseModel):
|
| 26 |
+
"""Form data for Tab 1: Project Info."""
|
| 27 |
+
|
| 28 |
+
project_name: str = ""
|
| 29 |
+
address: str = ""
|
| 30 |
+
city: str = ""
|
| 31 |
+
state: str = ""
|
| 32 |
+
zip_code: str = ""
|
| 33 |
+
client_name: str = ""
|
| 34 |
+
fire_date: str = "" # ISO format string for form compatibility
|
| 35 |
+
assessment_date: str = ""
|
| 36 |
+
facility_classification: FacilityClassification = "non-operational"
|
| 37 |
+
construction_era: ConstructionEra = "post-2000"
|
| 38 |
+
assessor_name: str = ""
|
| 39 |
+
assessor_credentials: str = ""
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class RoomFormData(BaseModel):
|
| 43 |
+
"""Form data for a single room."""
|
| 44 |
+
|
| 45 |
+
id: str = Field(default_factory=lambda: f"room-{uuid.uuid4().hex[:8]}")
|
| 46 |
+
name: str = ""
|
| 47 |
+
floor: str = ""
|
| 48 |
+
length_ft: float = 0
|
| 49 |
+
width_ft: float = 0
|
| 50 |
+
ceiling_height_ft: float = 0
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class ImageFormData(BaseModel):
|
| 54 |
+
"""Form data for a single image (metadata only, not bytes)."""
|
| 55 |
+
|
| 56 |
+
id: str = Field(default_factory=lambda: f"img-{uuid.uuid4().hex[:8]}")
|
| 57 |
+
filename: str = ""
|
| 58 |
+
room_id: str = ""
|
| 59 |
+
description: str = ""
|
| 60 |
+
# Image bytes stored separately, referenced by id
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class ObservationsFormData(BaseModel):
|
| 64 |
+
"""Form data for Tab 4: Observations."""
|
| 65 |
+
|
| 66 |
+
smoke_fire_odor: bool = False
|
| 67 |
+
odor_intensity: OdorIntensity = "none"
|
| 68 |
+
visible_soot_deposits: bool = False
|
| 69 |
+
soot_pattern_description: str = ""
|
| 70 |
+
large_char_particles: bool = False
|
| 71 |
+
char_density_estimate: Optional[CharDensity] = None
|
| 72 |
+
ash_like_residue: bool = False
|
| 73 |
+
ash_color_texture: str = ""
|
| 74 |
+
surface_discoloration: bool = False
|
| 75 |
+
discoloration_description: str = ""
|
| 76 |
+
dust_loading_interference: bool = False
|
| 77 |
+
dust_notes: str = ""
|
| 78 |
+
wildfire_indicators: bool = False
|
| 79 |
+
wildfire_notes: str = ""
|
| 80 |
+
additional_notes: str = ""
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class SessionState(BaseModel):
|
| 84 |
+
"""Complete session state for an assessment.
|
| 85 |
+
|
| 86 |
+
This model is serialized to localStorage for persistence.
|
| 87 |
+
Images are stored separately and referenced by ID.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
# Session metadata
|
| 91 |
+
session_id: str = Field(default_factory=lambda: uuid.uuid4().hex)
|
| 92 |
+
created_at: str = Field(default_factory=lambda: datetime.now().isoformat())
|
| 93 |
+
updated_at: str = Field(default_factory=lambda: datetime.now().isoformat())
|
| 94 |
+
name: str = "" # Display name for history list
|
| 95 |
+
|
| 96 |
+
# Tab completion status
|
| 97 |
+
tab1_complete: bool = False
|
| 98 |
+
tab2_complete: bool = False
|
| 99 |
+
tab3_complete: bool = False
|
| 100 |
+
tab4_complete: bool = False
|
| 101 |
+
|
| 102 |
+
# Form data by tab
|
| 103 |
+
project: ProjectFormData = Field(default_factory=ProjectFormData)
|
| 104 |
+
rooms: list[RoomFormData] = Field(default_factory=list)
|
| 105 |
+
images: list[ImageFormData] = Field(default_factory=list)
|
| 106 |
+
observations: ObservationsFormData = Field(default_factory=ObservationsFormData)
|
| 107 |
+
|
| 108 |
+
# Results (after generation)
|
| 109 |
+
has_results: bool = False
|
| 110 |
+
results_generated_at: Optional[str] = None
|
| 111 |
+
|
| 112 |
+
def update_timestamp(self) -> None:
|
| 113 |
+
"""Update the updated_at timestamp."""
|
| 114 |
+
self.updated_at = datetime.now().isoformat()
|
| 115 |
+
|
| 116 |
+
def get_display_name(self) -> str:
|
| 117 |
+
"""Get a display name for the history list."""
|
| 118 |
+
if self.name:
|
| 119 |
+
return self.name
|
| 120 |
+
if self.project.project_name:
|
| 121 |
+
return self.project.project_name
|
| 122 |
+
return f"Assessment {self.session_id[:8]}"
|
| 123 |
+
|
| 124 |
+
def validate_tab1(self) -> tuple[bool, list[str]]:
|
| 125 |
+
"""Validate Tab 1 (Project Info) is complete."""
|
| 126 |
+
errors = []
|
| 127 |
+
p = self.project
|
| 128 |
+
if not p.project_name:
|
| 129 |
+
errors.append("Project name is required")
|
| 130 |
+
if not p.address:
|
| 131 |
+
errors.append("Address is required")
|
| 132 |
+
if not p.city:
|
| 133 |
+
errors.append("City is required")
|
| 134 |
+
if not p.state:
|
| 135 |
+
errors.append("State is required")
|
| 136 |
+
if not p.zip_code:
|
| 137 |
+
errors.append("ZIP code is required")
|
| 138 |
+
if not p.client_name:
|
| 139 |
+
errors.append("Client name is required")
|
| 140 |
+
if not p.fire_date:
|
| 141 |
+
errors.append("Fire date is required")
|
| 142 |
+
if not p.assessment_date:
|
| 143 |
+
errors.append("Assessment date is required")
|
| 144 |
+
if not p.assessor_name:
|
| 145 |
+
errors.append("Assessor name is required")
|
| 146 |
+
return len(errors) == 0, errors
|
| 147 |
+
|
| 148 |
+
def validate_tab2(self) -> tuple[bool, list[str]]:
|
| 149 |
+
"""Validate Tab 2 (Building/Rooms) is complete."""
|
| 150 |
+
errors = []
|
| 151 |
+
if not self.rooms:
|
| 152 |
+
errors.append("At least one room is required")
|
| 153 |
+
for room in self.rooms:
|
| 154 |
+
if not room.name:
|
| 155 |
+
errors.append(f"Room name is required")
|
| 156 |
+
if room.length_ft <= 0:
|
| 157 |
+
errors.append(f"Room '{room.name}': Length must be greater than 0")
|
| 158 |
+
if room.width_ft <= 0:
|
| 159 |
+
errors.append(f"Room '{room.name}': Width must be greater than 0")
|
| 160 |
+
if room.ceiling_height_ft <= 0:
|
| 161 |
+
errors.append(f"Room '{room.name}': Ceiling height must be greater than 0")
|
| 162 |
+
return len(errors) == 0, errors
|
| 163 |
+
|
| 164 |
+
def validate_tab3(self) -> tuple[bool, list[str]]:
|
| 165 |
+
"""Validate Tab 3 (Images) is complete."""
|
| 166 |
+
errors = []
|
| 167 |
+
if not self.images:
|
| 168 |
+
errors.append("At least one image is required")
|
| 169 |
+
for img in self.images:
|
| 170 |
+
if not img.room_id:
|
| 171 |
+
errors.append(f"Image '{img.filename}': Must be associated with a room")
|
| 172 |
+
return len(errors) == 0, errors
|
| 173 |
+
|
| 174 |
+
def validate_tab4(self) -> tuple[bool, list[str]]:
|
| 175 |
+
"""Validate Tab 4 (Observations) is complete."""
|
| 176 |
+
# Tab 4 has no required fields - all checkboxes default to False
|
| 177 |
+
return True, []
|
| 178 |
+
|
| 179 |
+
def can_generate(self) -> tuple[bool, list[str]]:
|
| 180 |
+
"""Check if assessment can be generated."""
|
| 181 |
+
all_errors = []
|
| 182 |
+
|
| 183 |
+
valid1, errors1 = self.validate_tab1()
|
| 184 |
+
if not valid1:
|
| 185 |
+
all_errors.extend(errors1)
|
| 186 |
+
|
| 187 |
+
valid2, errors2 = self.validate_tab2()
|
| 188 |
+
if not valid2:
|
| 189 |
+
all_errors.extend(errors2)
|
| 190 |
+
|
| 191 |
+
valid3, errors3 = self.validate_tab3()
|
| 192 |
+
if not valid3:
|
| 193 |
+
all_errors.extend(errors3)
|
| 194 |
+
|
| 195 |
+
valid4, errors4 = self.validate_tab4()
|
| 196 |
+
if not valid4:
|
| 197 |
+
all_errors.extend(errors4)
|
| 198 |
+
|
| 199 |
+
return len(all_errors) == 0, all_errors
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
class AssessmentHistory(BaseModel):
|
| 203 |
+
"""Collection of saved assessments for history list."""
|
| 204 |
+
|
| 205 |
+
assessments: list[SessionState] = Field(default_factory=list)
|
| 206 |
+
current_session_id: Optional[str] = None
|
| 207 |
+
|
| 208 |
+
def add_assessment(self, session: SessionState) -> None:
|
| 209 |
+
"""Add or update an assessment in history."""
|
| 210 |
+
session.update_timestamp()
|
| 211 |
+
# Remove existing if present
|
| 212 |
+
self.assessments = [a for a in self.assessments if a.session_id != session.session_id]
|
| 213 |
+
# Add to front of list
|
| 214 |
+
self.assessments.insert(0, session)
|
| 215 |
+
# Keep only last 20 assessments
|
| 216 |
+
self.assessments = self.assessments[:20]
|
| 217 |
+
|
| 218 |
+
def get_assessment(self, session_id: str) -> Optional[SessionState]:
|
| 219 |
+
"""Get an assessment by ID."""
|
| 220 |
+
for a in self.assessments:
|
| 221 |
+
if a.session_id == session_id:
|
| 222 |
+
return a
|
| 223 |
+
return None
|
| 224 |
+
|
| 225 |
+
def remove_assessment(self, session_id: str) -> None:
|
| 226 |
+
"""Remove an assessment from history."""
|
| 227 |
+
self.assessments = [a for a in self.assessments if a.session_id != session_id]
|
| 228 |
+
|
| 229 |
+
def get_history_items(self) -> list[dict]:
|
| 230 |
+
"""Get history items for display in dropdown."""
|
| 231 |
+
return [
|
| 232 |
+
{
|
| 233 |
+
"id": a.session_id,
|
| 234 |
+
"name": a.get_display_name(),
|
| 235 |
+
"updated": a.updated_at,
|
| 236 |
+
"has_results": a.has_results,
|
| 237 |
+
}
|
| 238 |
+
for a in self.assessments
|
| 239 |
+
]
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
# --- Gradio State Helpers ---
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def create_new_session() -> SessionState:
|
| 246 |
+
"""Create a new empty session."""
|
| 247 |
+
return SessionState()
|
| 248 |
+
|
| 249 |
+
|
| 250 |
+
def session_to_json(session: SessionState) -> str:
|
| 251 |
+
"""Serialize session to JSON for localStorage."""
|
| 252 |
+
return session.model_dump_json()
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
def session_from_json(json_str: str) -> SessionState:
|
| 256 |
+
"""Deserialize session from JSON."""
|
| 257 |
+
try:
|
| 258 |
+
return SessionState.model_validate_json(json_str)
|
| 259 |
+
except Exception:
|
| 260 |
+
return create_new_session()
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def history_to_json(history: AssessmentHistory) -> str:
|
| 264 |
+
"""Serialize history to JSON for localStorage."""
|
| 265 |
+
return history.model_dump_json()
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def history_from_json(json_str: str) -> AssessmentHistory:
|
| 269 |
+
"""Deserialize history from JSON."""
|
| 270 |
+
try:
|
| 271 |
+
return AssessmentHistory.model_validate_json(json_str)
|
| 272 |
+
except Exception:
|
| 273 |
+
return AssessmentHistory()
|
ui/storage.py
ADDED
|
@@ -0,0 +1,205 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""localStorage integration for Gradio via JavaScript injection.
|
| 2 |
+
|
| 3 |
+
Provides JavaScript code that syncs session state with browser localStorage.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
# localStorage keys
|
| 7 |
+
STORAGE_KEY_SESSION = "fdam_current_session"
|
| 8 |
+
STORAGE_KEY_HISTORY = "fdam_assessment_history"
|
| 9 |
+
STORAGE_KEY_IMAGES = "fdam_image_refs" # References only, not actual bytes
|
| 10 |
+
|
| 11 |
+
# JavaScript code for localStorage operations
|
| 12 |
+
LOCALSTORAGE_JS = """
|
| 13 |
+
<script>
|
| 14 |
+
(function() {
|
| 15 |
+
// FDAM localStorage utilities
|
| 16 |
+
window.fdamStorage = {
|
| 17 |
+
KEYS: {
|
| 18 |
+
SESSION: 'fdam_current_session',
|
| 19 |
+
HISTORY: 'fdam_assessment_history',
|
| 20 |
+
IMAGE_REFS: 'fdam_image_refs'
|
| 21 |
+
},
|
| 22 |
+
|
| 23 |
+
// Save current session
|
| 24 |
+
saveSession: function(sessionJson) {
|
| 25 |
+
try {
|
| 26 |
+
localStorage.setItem(this.KEYS.SESSION, sessionJson);
|
| 27 |
+
console.log('[FDAM] Session saved to localStorage');
|
| 28 |
+
return true;
|
| 29 |
+
} catch (e) {
|
| 30 |
+
console.error('[FDAM] Failed to save session:', e);
|
| 31 |
+
return false;
|
| 32 |
+
}
|
| 33 |
+
},
|
| 34 |
+
|
| 35 |
+
// Load current session
|
| 36 |
+
loadSession: function() {
|
| 37 |
+
try {
|
| 38 |
+
const data = localStorage.getItem(this.KEYS.SESSION);
|
| 39 |
+
if (data) {
|
| 40 |
+
console.log('[FDAM] Session loaded from localStorage');
|
| 41 |
+
return data;
|
| 42 |
+
}
|
| 43 |
+
} catch (e) {
|
| 44 |
+
console.error('[FDAM] Failed to load session:', e);
|
| 45 |
+
}
|
| 46 |
+
return null;
|
| 47 |
+
},
|
| 48 |
+
|
| 49 |
+
// Save assessment history
|
| 50 |
+
saveHistory: function(historyJson) {
|
| 51 |
+
try {
|
| 52 |
+
localStorage.setItem(this.KEYS.HISTORY, historyJson);
|
| 53 |
+
console.log('[FDAM] History saved to localStorage');
|
| 54 |
+
return true;
|
| 55 |
+
} catch (e) {
|
| 56 |
+
console.error('[FDAM] Failed to save history:', e);
|
| 57 |
+
return false;
|
| 58 |
+
}
|
| 59 |
+
},
|
| 60 |
+
|
| 61 |
+
// Load assessment history
|
| 62 |
+
loadHistory: function() {
|
| 63 |
+
try {
|
| 64 |
+
const data = localStorage.getItem(this.KEYS.HISTORY);
|
| 65 |
+
if (data) {
|
| 66 |
+
console.log('[FDAM] History loaded from localStorage');
|
| 67 |
+
return data;
|
| 68 |
+
}
|
| 69 |
+
} catch (e) {
|
| 70 |
+
console.error('[FDAM] Failed to load history:', e);
|
| 71 |
+
}
|
| 72 |
+
return null;
|
| 73 |
+
},
|
| 74 |
+
|
| 75 |
+
// Clear all FDAM data
|
| 76 |
+
clearAll: function() {
|
| 77 |
+
try {
|
| 78 |
+
localStorage.removeItem(this.KEYS.SESSION);
|
| 79 |
+
localStorage.removeItem(this.KEYS.HISTORY);
|
| 80 |
+
localStorage.removeItem(this.KEYS.IMAGE_REFS);
|
| 81 |
+
console.log('[FDAM] All localStorage data cleared');
|
| 82 |
+
return true;
|
| 83 |
+
} catch (e) {
|
| 84 |
+
console.error('[FDAM] Failed to clear storage:', e);
|
| 85 |
+
return false;
|
| 86 |
+
}
|
| 87 |
+
},
|
| 88 |
+
|
| 89 |
+
// Get storage usage info
|
| 90 |
+
getStorageInfo: function() {
|
| 91 |
+
try {
|
| 92 |
+
let total = 0;
|
| 93 |
+
for (let key in localStorage) {
|
| 94 |
+
if (key.startsWith('fdam_')) {
|
| 95 |
+
total += localStorage.getItem(key).length;
|
| 96 |
+
}
|
| 97 |
+
}
|
| 98 |
+
return {
|
| 99 |
+
used_bytes: total,
|
| 100 |
+
used_kb: (total / 1024).toFixed(2),
|
| 101 |
+
limit_kb: 5120 // ~5MB typical limit
|
| 102 |
+
};
|
| 103 |
+
} catch (e) {
|
| 104 |
+
return { error: e.message };
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
};
|
| 108 |
+
|
| 109 |
+
// Expose to global scope for Gradio callbacks
|
| 110 |
+
window.saveSession = window.fdamStorage.saveSession.bind(window.fdamStorage);
|
| 111 |
+
window.loadSession = window.fdamStorage.loadSession.bind(window.fdamStorage);
|
| 112 |
+
window.saveHistory = window.fdamStorage.saveHistory.bind(window.fdamStorage);
|
| 113 |
+
window.loadHistory = window.fdamStorage.loadHistory.bind(window.fdamStorage);
|
| 114 |
+
|
| 115 |
+
console.log('[FDAM] localStorage utilities loaded');
|
| 116 |
+
})();
|
| 117 |
+
</script>
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
# JavaScript functions for Gradio event handlers
|
| 121 |
+
JS_SAVE_SESSION = """
|
| 122 |
+
async (sessionJson) => {
|
| 123 |
+
if (window.fdamStorage) {
|
| 124 |
+
window.fdamStorage.saveSession(sessionJson);
|
| 125 |
+
}
|
| 126 |
+
return sessionJson;
|
| 127 |
+
}
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
JS_LOAD_SESSION = """
|
| 131 |
+
async () => {
|
| 132 |
+
if (window.fdamStorage) {
|
| 133 |
+
return window.fdamStorage.loadSession() || '{}';
|
| 134 |
+
}
|
| 135 |
+
return '{}';
|
| 136 |
+
}
|
| 137 |
+
"""
|
| 138 |
+
|
| 139 |
+
JS_SAVE_HISTORY = """
|
| 140 |
+
async (historyJson) => {
|
| 141 |
+
if (window.fdamStorage) {
|
| 142 |
+
window.fdamStorage.saveHistory(historyJson);
|
| 143 |
+
}
|
| 144 |
+
return historyJson;
|
| 145 |
+
}
|
| 146 |
+
"""
|
| 147 |
+
|
| 148 |
+
JS_LOAD_HISTORY = """
|
| 149 |
+
async () => {
|
| 150 |
+
if (window.fdamStorage) {
|
| 151 |
+
return window.fdamStorage.loadHistory() || '{"assessments":[],"current_session_id":null}';
|
| 152 |
+
}
|
| 153 |
+
return '{"assessments":[],"current_session_id":null}';
|
| 154 |
+
}
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
# JavaScript to auto-load session on page load
|
| 158 |
+
JS_AUTO_LOAD = """
|
| 159 |
+
async () => {
|
| 160 |
+
// Small delay to ensure Gradio is fully loaded
|
| 161 |
+
await new Promise(resolve => setTimeout(resolve, 500));
|
| 162 |
+
|
| 163 |
+
if (window.fdamStorage) {
|
| 164 |
+
const session = window.fdamStorage.loadSession();
|
| 165 |
+
const history = window.fdamStorage.loadHistory();
|
| 166 |
+
return [session || '{}', history || '{"assessments":[],"current_session_id":null}'];
|
| 167 |
+
}
|
| 168 |
+
return ['{}', '{"assessments":[],"current_session_id":null}'];
|
| 169 |
+
}
|
| 170 |
+
"""
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def get_head_html() -> str:
|
| 174 |
+
"""Get HTML to inject into Gradio head for localStorage support."""
|
| 175 |
+
return LOCALSTORAGE_JS
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def create_save_trigger_js(field_updates: dict[str, str]) -> str:
|
| 179 |
+
"""Create JavaScript that triggers save after field updates.
|
| 180 |
+
|
| 181 |
+
Args:
|
| 182 |
+
field_updates: Mapping of field name to value expression
|
| 183 |
+
|
| 184 |
+
Returns:
|
| 185 |
+
JavaScript code string
|
| 186 |
+
"""
|
| 187 |
+
updates = ", ".join(f'"{k}": {v}' for k, v in field_updates.items())
|
| 188 |
+
return f"""
|
| 189 |
+
async (currentSession, ...values) => {{
|
| 190 |
+
try {{
|
| 191 |
+
const session = JSON.parse(currentSession || '{{}}');
|
| 192 |
+
const updates = {{ {updates} }};
|
| 193 |
+
Object.assign(session, updates);
|
| 194 |
+
session.updated_at = new Date().toISOString();
|
| 195 |
+
const newSession = JSON.stringify(session);
|
| 196 |
+
if (window.fdamStorage) {{
|
| 197 |
+
window.fdamStorage.saveSession(newSession);
|
| 198 |
+
}}
|
| 199 |
+
return newSession;
|
| 200 |
+
}} catch (e) {{
|
| 201 |
+
console.error('[FDAM] Save trigger error:', e);
|
| 202 |
+
return currentSession;
|
| 203 |
+
}}
|
| 204 |
+
}}
|
| 205 |
+
"""
|
ui/tabs/__init__.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tab modules for FDAM AI Pipeline UI."""
|
| 2 |
+
|
| 3 |
+
from . import project
|
| 4 |
+
from . import rooms
|
| 5 |
+
from . import images
|
| 6 |
+
from . import observations
|
| 7 |
+
from . import results
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
"project",
|
| 11 |
+
"rooms",
|
| 12 |
+
"images",
|
| 13 |
+
"observations",
|
| 14 |
+
"results",
|
| 15 |
+
]
|
ui/tabs/images.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tab 3: Images.
|
| 2 |
+
|
| 3 |
+
Upload and manage fire damage images for AI analysis.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import uuid
|
| 7 |
+
import gradio as gr
|
| 8 |
+
from typing import Any, Optional
|
| 9 |
+
from PIL import Image
|
| 10 |
+
import io
|
| 11 |
+
|
| 12 |
+
from ui.state import SessionState, ImageFormData
|
| 13 |
+
from ui.components import image_store
|
| 14 |
+
from config.settings import settings
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def create_tab() -> dict[str, Any]:
|
| 18 |
+
"""Create Tab 3 UI components.
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
Dictionary of component references for event wiring.
|
| 22 |
+
"""
|
| 23 |
+
gr.Markdown("### Fire Damage Images")
|
| 24 |
+
gr.Markdown(
|
| 25 |
+
f"*Upload up to {settings.max_images_per_assessment} images for AI analysis. "
|
| 26 |
+
f"Each image must be associated with a room.*"
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
with gr.Row():
|
| 30 |
+
with gr.Column(scale=2):
|
| 31 |
+
image_upload = gr.Image(
|
| 32 |
+
label="Upload Image",
|
| 33 |
+
type="pil",
|
| 34 |
+
sources=["upload"],
|
| 35 |
+
elem_id="image_upload",
|
| 36 |
+
)
|
| 37 |
+
room_select = gr.Dropdown(
|
| 38 |
+
label="Associate with Room *",
|
| 39 |
+
choices=[],
|
| 40 |
+
value=None,
|
| 41 |
+
elem_id="room_select",
|
| 42 |
+
)
|
| 43 |
+
image_description = gr.Textbox(
|
| 44 |
+
label="Description (optional)",
|
| 45 |
+
placeholder="e.g., View of ceiling deck from center aisle",
|
| 46 |
+
elem_id="image_description",
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
with gr.Row():
|
| 50 |
+
add_image_btn = gr.Button("Add Image", variant="primary")
|
| 51 |
+
clear_upload_btn = gr.Button("Clear", variant="secondary")
|
| 52 |
+
|
| 53 |
+
with gr.Column(scale=3):
|
| 54 |
+
images_gallery = gr.Gallery(
|
| 55 |
+
label="Images Added",
|
| 56 |
+
columns=3,
|
| 57 |
+
height="auto",
|
| 58 |
+
elem_id="images_gallery",
|
| 59 |
+
)
|
| 60 |
+
with gr.Row():
|
| 61 |
+
remove_last_btn = gr.Button("Remove Last Image", variant="secondary")
|
| 62 |
+
clear_all_btn = gr.Button("Clear All Images", variant="stop")
|
| 63 |
+
|
| 64 |
+
# Image count and status
|
| 65 |
+
with gr.Row():
|
| 66 |
+
image_count = gr.Textbox(
|
| 67 |
+
label="Images Added",
|
| 68 |
+
value="0 / 20",
|
| 69 |
+
interactive=False,
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
# Validation status
|
| 73 |
+
with gr.Row():
|
| 74 |
+
validation_status = gr.HTML(
|
| 75 |
+
value="",
|
| 76 |
+
elem_id="tab3_validation",
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# Resume warning (shown when images need re-upload)
|
| 80 |
+
with gr.Row():
|
| 81 |
+
resume_warning = gr.HTML(
|
| 82 |
+
value="",
|
| 83 |
+
elem_id="resume_warning",
|
| 84 |
+
visible=False,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
with gr.Row():
|
| 88 |
+
back_btn = gr.Button("← Back to Rooms")
|
| 89 |
+
validate_btn = gr.Button(
|
| 90 |
+
"Validate & Continue to Observations →",
|
| 91 |
+
variant="primary",
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
return {
|
| 95 |
+
"image_upload": image_upload,
|
| 96 |
+
"room_select": room_select,
|
| 97 |
+
"image_description": image_description,
|
| 98 |
+
"add_image_btn": add_image_btn,
|
| 99 |
+
"clear_upload_btn": clear_upload_btn,
|
| 100 |
+
"images_gallery": images_gallery,
|
| 101 |
+
"remove_last_btn": remove_last_btn,
|
| 102 |
+
"clear_all_btn": clear_all_btn,
|
| 103 |
+
"image_count": image_count,
|
| 104 |
+
"validation_status": validation_status,
|
| 105 |
+
"resume_warning": resume_warning,
|
| 106 |
+
"back_btn": back_btn,
|
| 107 |
+
"validate_btn": validate_btn,
|
| 108 |
+
}
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def add_image(
|
| 112 |
+
session: SessionState,
|
| 113 |
+
image: Optional[Image.Image],
|
| 114 |
+
room_id: str,
|
| 115 |
+
description: str,
|
| 116 |
+
) -> tuple[SessionState, list[tuple], str, str, None, None, str]:
|
| 117 |
+
"""Add an image to the session.
|
| 118 |
+
|
| 119 |
+
Returns:
|
| 120 |
+
Tuple of (session, gallery_data, validation_html, image_count,
|
| 121 |
+
cleared_image, cleared_description, room_id).
|
| 122 |
+
"""
|
| 123 |
+
validation_html = ""
|
| 124 |
+
|
| 125 |
+
# Validate input
|
| 126 |
+
errors = []
|
| 127 |
+
if image is None:
|
| 128 |
+
errors.append("Please upload an image")
|
| 129 |
+
if not room_id:
|
| 130 |
+
errors.append("Please select a room for this image")
|
| 131 |
+
if len(session.images) >= settings.max_images_per_assessment:
|
| 132 |
+
errors.append(f"Maximum of {settings.max_images_per_assessment} images allowed")
|
| 133 |
+
|
| 134 |
+
if errors:
|
| 135 |
+
error_items = "".join(f"<li>{e}</li>" for e in errors)
|
| 136 |
+
validation_html = f"""
|
| 137 |
+
<div style="background: #ffebee; border: 1px solid #ef5350; border-radius: 4px; padding: 10px;">
|
| 138 |
+
<ul style="margin: 0; padding-left: 20px; color: #c62828;">
|
| 139 |
+
{error_items}
|
| 140 |
+
</ul>
|
| 141 |
+
</div>
|
| 142 |
+
"""
|
| 143 |
+
gallery_data = _get_gallery_data(session)
|
| 144 |
+
count_str = f"{len(session.images)} / {settings.max_images_per_assessment}"
|
| 145 |
+
return session, gallery_data, validation_html, count_str, image, description, room_id
|
| 146 |
+
|
| 147 |
+
# Generate image ID
|
| 148 |
+
image_id = f"img-{uuid.uuid4().hex[:8]}"
|
| 149 |
+
|
| 150 |
+
# Store image bytes in memory
|
| 151 |
+
img_bytes = io.BytesIO()
|
| 152 |
+
image.save(img_bytes, format="PNG")
|
| 153 |
+
image_store.store(image_id, img_bytes.getvalue())
|
| 154 |
+
|
| 155 |
+
# Get room name for filename
|
| 156 |
+
room_name = "unknown"
|
| 157 |
+
for room in session.rooms:
|
| 158 |
+
if room.id == room_id:
|
| 159 |
+
room_name = room.name.replace(" ", "_")[:20]
|
| 160 |
+
break
|
| 161 |
+
|
| 162 |
+
# Add image metadata to session
|
| 163 |
+
img_meta = ImageFormData(
|
| 164 |
+
id=image_id,
|
| 165 |
+
filename=f"{room_name}_{image_id}.png",
|
| 166 |
+
room_id=room_id,
|
| 167 |
+
description=description.strip() if description else "",
|
| 168 |
+
)
|
| 169 |
+
session.images.append(img_meta)
|
| 170 |
+
session.update_timestamp()
|
| 171 |
+
|
| 172 |
+
# Success message
|
| 173 |
+
validation_html = f"""
|
| 174 |
+
<div style="background: #e8f5e9; border: 1px solid #66bb6a; border-radius: 4px; padding: 10px;">
|
| 175 |
+
<span style="color: #2e7d32;">✓ Image added for room: {room_name}</span>
|
| 176 |
+
</div>
|
| 177 |
+
"""
|
| 178 |
+
|
| 179 |
+
gallery_data = _get_gallery_data(session)
|
| 180 |
+
count_str = f"{len(session.images)} / {settings.max_images_per_assessment}"
|
| 181 |
+
|
| 182 |
+
# Clear form
|
| 183 |
+
return session, gallery_data, validation_html, count_str, None, "", room_id
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def remove_last_image(session: SessionState) -> tuple[SessionState, list[tuple], str, str]:
|
| 187 |
+
"""Remove the last image from the session."""
|
| 188 |
+
validation_html = ""
|
| 189 |
+
|
| 190 |
+
if session.images:
|
| 191 |
+
removed = session.images.pop()
|
| 192 |
+
image_store.remove(removed.id)
|
| 193 |
+
session.update_timestamp()
|
| 194 |
+
validation_html = f"""
|
| 195 |
+
<div style="background: #fff3e0; border: 1px solid #ffb74d; border-radius: 4px; padding: 10px;">
|
| 196 |
+
<span style="color: #e65100;">Removed image: {removed.filename}</span>
|
| 197 |
+
</div>
|
| 198 |
+
"""
|
| 199 |
+
|
| 200 |
+
gallery_data = _get_gallery_data(session)
|
| 201 |
+
count_str = f"{len(session.images)} / {settings.max_images_per_assessment}"
|
| 202 |
+
return session, gallery_data, validation_html, count_str
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def clear_all_images(session: SessionState) -> tuple[SessionState, list[tuple], str, str]:
|
| 206 |
+
"""Clear all images from the session."""
|
| 207 |
+
count = len(session.images)
|
| 208 |
+
|
| 209 |
+
# Clear from store
|
| 210 |
+
for img in session.images:
|
| 211 |
+
image_store.remove(img.id)
|
| 212 |
+
|
| 213 |
+
session.images = []
|
| 214 |
+
session.update_timestamp()
|
| 215 |
+
|
| 216 |
+
validation_html = ""
|
| 217 |
+
if count > 0:
|
| 218 |
+
validation_html = f"""
|
| 219 |
+
<div style="background: #fff3e0; border: 1px solid #ffb74d; border-radius: 4px; padding: 10px;">
|
| 220 |
+
<span style="color: #e65100;">Cleared {count} image(s)</span>
|
| 221 |
+
</div>
|
| 222 |
+
"""
|
| 223 |
+
|
| 224 |
+
count_str = f"0 / {settings.max_images_per_assessment}"
|
| 225 |
+
return session, [], validation_html, count_str
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def validate_and_continue(session: SessionState) -> tuple[SessionState, str, int]:
|
| 229 |
+
"""Validate Tab 3 and proceed to Tab 4.
|
| 230 |
+
|
| 231 |
+
Returns:
|
| 232 |
+
Tuple of (session, validation_html, next_tab_index).
|
| 233 |
+
"""
|
| 234 |
+
# Check if images need re-upload (session restored but images not in memory)
|
| 235 |
+
expected_ids = [img.id for img in session.images]
|
| 236 |
+
missing_ids = image_store.get_missing_ids(expected_ids)
|
| 237 |
+
|
| 238 |
+
if missing_ids:
|
| 239 |
+
missing_count = len(missing_ids)
|
| 240 |
+
html = f"""
|
| 241 |
+
<div style="background: #fff3e0; border: 1px solid #ffb74d; border-radius: 4px; padding: 10px;">
|
| 242 |
+
<strong style="color: #e65100;">⚠ {missing_count} image(s) need to be re-uploaded</strong>
|
| 243 |
+
<p style="color: #e65100; margin: 5px 0 0 0;">
|
| 244 |
+
Images are not stored in browser storage. Please re-upload the missing images
|
| 245 |
+
or clear the image list and start fresh.
|
| 246 |
+
</p>
|
| 247 |
+
</div>
|
| 248 |
+
"""
|
| 249 |
+
return session, html, 2 # Stay on Images tab
|
| 250 |
+
|
| 251 |
+
is_valid, errors = session.validate_tab3()
|
| 252 |
+
|
| 253 |
+
if is_valid:
|
| 254 |
+
session.tab3_complete = True
|
| 255 |
+
session.update_timestamp()
|
| 256 |
+
html = """
|
| 257 |
+
<div style="background: #e8f5e9; border: 1px solid #66bb6a; border-radius: 4px; padding: 10px;">
|
| 258 |
+
<span style="color: #2e7d32;">✓ Images complete. Proceeding to Observations tab...</span>
|
| 259 |
+
</div>
|
| 260 |
+
"""
|
| 261 |
+
return session, html, 3 # Go to tab index 3 (Observations)
|
| 262 |
+
else:
|
| 263 |
+
session.tab3_complete = False
|
| 264 |
+
error_items = "".join(f"<li>{e}</li>" for e in errors)
|
| 265 |
+
html = f"""
|
| 266 |
+
<div style="background: #ffebee; border: 1px solid #ef5350; border-radius: 4px; padding: 10px;">
|
| 267 |
+
<strong style="color: #c62828;">Please fix the following:</strong>
|
| 268 |
+
<ul style="margin: 5px 0 0 0; padding-left: 20px; color: #c62828;">
|
| 269 |
+
{error_items}
|
| 270 |
+
</ul>
|
| 271 |
+
</div>
|
| 272 |
+
"""
|
| 273 |
+
return session, html, 2 # Stay on current tab
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def update_room_choices(session: SessionState) -> dict:
|
| 277 |
+
"""Update room dropdown choices.
|
| 278 |
+
|
| 279 |
+
Returns:
|
| 280 |
+
Gradio update dict for Dropdown component.
|
| 281 |
+
"""
|
| 282 |
+
choices = [(r.name, r.id) for r in session.rooms]
|
| 283 |
+
# Don't reset value - let user keep their selection when adding multiple images
|
| 284 |
+
return gr.update(choices=choices)
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
def load_from_session(session: SessionState) -> tuple[list[tuple], str, str]:
|
| 288 |
+
"""Load gallery data and count from session.
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
Tuple of (gallery_data, image_count, resume_warning_html).
|
| 292 |
+
"""
|
| 293 |
+
gallery_data = _get_gallery_data(session)
|
| 294 |
+
count_str = f"{len(session.images)} / {settings.max_images_per_assessment}"
|
| 295 |
+
|
| 296 |
+
# Check for missing images
|
| 297 |
+
expected_ids = [img.id for img in session.images]
|
| 298 |
+
missing_ids = image_store.get_missing_ids(expected_ids)
|
| 299 |
+
|
| 300 |
+
resume_html = ""
|
| 301 |
+
if missing_ids and session.images:
|
| 302 |
+
resume_html = f"""
|
| 303 |
+
<div style="background: #fff3e0; border: 1px solid #ffb74d; border-radius: 4px; padding: 10px;">
|
| 304 |
+
<strong style="color: #e65100;">⚠ {len(missing_ids)} image(s) need to be re-uploaded</strong>
|
| 305 |
+
<p style="color: #e65100; margin: 5px 0 0 0;">
|
| 306 |
+
Session restored, but images must be re-uploaded as they are not stored in browser storage.
|
| 307 |
+
</p>
|
| 308 |
+
</div>
|
| 309 |
+
"""
|
| 310 |
+
|
| 311 |
+
return gallery_data, count_str, resume_html
|
| 312 |
+
|
| 313 |
+
|
| 314 |
+
def _get_gallery_data(session: SessionState) -> list[tuple]:
|
| 315 |
+
"""Get gallery data from session images.
|
| 316 |
+
|
| 317 |
+
Returns:
|
| 318 |
+
List of (image, caption) tuples for gallery.
|
| 319 |
+
"""
|
| 320 |
+
gallery_data = []
|
| 321 |
+
for img_meta in session.images:
|
| 322 |
+
img_bytes = image_store.get(img_meta.id)
|
| 323 |
+
if img_bytes:
|
| 324 |
+
# Convert bytes to PIL Image for gallery
|
| 325 |
+
pil_image = Image.open(io.BytesIO(img_bytes))
|
| 326 |
+
caption = img_meta.description or img_meta.filename
|
| 327 |
+
gallery_data.append((pil_image, caption))
|
| 328 |
+
return gallery_data
|
ui/tabs/observations.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tab 4: Observations.
|
| 2 |
+
|
| 3 |
+
Qualitative observation checklist per FDAM §2.3.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from ui.state import SessionState, ObservationsFormData
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Map UI values to schema values
|
| 13 |
+
ODOR_MAP = {
|
| 14 |
+
"None": "none",
|
| 15 |
+
"Faint": "faint",
|
| 16 |
+
"Moderate": "moderate",
|
| 17 |
+
"Strong": "strong",
|
| 18 |
+
}
|
| 19 |
+
ODOR_MAP_REVERSE = {v: k for k, v in ODOR_MAP.items()}
|
| 20 |
+
|
| 21 |
+
CHAR_DENSITY_MAP = {
|
| 22 |
+
"None": None,
|
| 23 |
+
"Sparse": "sparse",
|
| 24 |
+
"Moderate": "moderate",
|
| 25 |
+
"Dense": "dense",
|
| 26 |
+
}
|
| 27 |
+
CHAR_DENSITY_MAP_REVERSE = {v: k for k, v in CHAR_DENSITY_MAP.items()}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def create_tab() -> dict[str, Any]:
|
| 31 |
+
"""Create Tab 4 UI components.
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
Dictionary of component references for event wiring.
|
| 35 |
+
"""
|
| 36 |
+
gr.Markdown("### Qualitative Observations")
|
| 37 |
+
gr.Markdown("*Document field observations per FDAM §2.3. All fields are optional but recommended.*")
|
| 38 |
+
|
| 39 |
+
with gr.Row():
|
| 40 |
+
with gr.Column():
|
| 41 |
+
gr.Markdown("#### Odor Assessment")
|
| 42 |
+
smoke_odor = gr.Checkbox(
|
| 43 |
+
label="Smoke/fire odor present?",
|
| 44 |
+
elem_id="smoke_odor",
|
| 45 |
+
)
|
| 46 |
+
odor_intensity = gr.Radio(
|
| 47 |
+
choices=["None", "Faint", "Moderate", "Strong"],
|
| 48 |
+
label="Odor Intensity",
|
| 49 |
+
value="None",
|
| 50 |
+
elem_id="odor_intensity",
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
gr.Markdown("#### Visible Contamination")
|
| 54 |
+
visible_soot = gr.Checkbox(
|
| 55 |
+
label="Visible soot deposits?",
|
| 56 |
+
elem_id="visible_soot",
|
| 57 |
+
)
|
| 58 |
+
soot_description = gr.Textbox(
|
| 59 |
+
label="Soot Pattern Description (optional)",
|
| 60 |
+
placeholder="e.g., Heavy deposits on ceiling, lighter on walls",
|
| 61 |
+
elem_id="soot_description",
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
large_char = gr.Checkbox(
|
| 65 |
+
label="Large char particles observed?",
|
| 66 |
+
elem_id="large_char",
|
| 67 |
+
)
|
| 68 |
+
char_density = gr.Radio(
|
| 69 |
+
choices=["None", "Sparse", "Moderate", "Dense"],
|
| 70 |
+
label="Char Density",
|
| 71 |
+
value="None",
|
| 72 |
+
elem_id="char_density",
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
ash_residue = gr.Checkbox(
|
| 76 |
+
label="Ash-like residue present?",
|
| 77 |
+
elem_id="ash_residue",
|
| 78 |
+
)
|
| 79 |
+
ash_description = gr.Textbox(
|
| 80 |
+
label="Ash Color/Texture (optional)",
|
| 81 |
+
placeholder="e.g., Gray powdery residue",
|
| 82 |
+
elem_id="ash_description",
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
with gr.Column():
|
| 86 |
+
gr.Markdown("#### Surface Conditions")
|
| 87 |
+
surface_discoloration = gr.Checkbox(
|
| 88 |
+
label="Surface discoloration?",
|
| 89 |
+
elem_id="surface_discoloration",
|
| 90 |
+
)
|
| 91 |
+
discoloration_description = gr.Textbox(
|
| 92 |
+
label="Discoloration Description (optional)",
|
| 93 |
+
placeholder="e.g., Yellowing on painted surfaces",
|
| 94 |
+
elem_id="discoloration_description",
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
gr.Markdown("#### Environmental Factors")
|
| 98 |
+
dust_interference = gr.Checkbox(
|
| 99 |
+
label="Dust loading or interference?",
|
| 100 |
+
info="Pre-existing dust may affect sample interpretation",
|
| 101 |
+
elem_id="dust_interference",
|
| 102 |
+
)
|
| 103 |
+
dust_notes = gr.Textbox(
|
| 104 |
+
label="Dust Notes (optional)",
|
| 105 |
+
placeholder="e.g., Heavy ambient dust from warehouse operations",
|
| 106 |
+
elem_id="dust_notes",
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
wildfire_indicators = gr.Checkbox(
|
| 110 |
+
label="Wildfire indicators (burned vegetation/pollen)?",
|
| 111 |
+
info="May indicate wildfire vs structural fire",
|
| 112 |
+
elem_id="wildfire_indicators",
|
| 113 |
+
)
|
| 114 |
+
wildfire_notes = gr.Textbox(
|
| 115 |
+
label="Wildfire Notes (optional)",
|
| 116 |
+
placeholder="e.g., Burned pine pollen visible on surfaces",
|
| 117 |
+
elem_id="wildfire_notes",
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
gr.Markdown("#### Additional Notes")
|
| 121 |
+
additional_notes = gr.Textbox(
|
| 122 |
+
label="Additional Observations",
|
| 123 |
+
lines=3,
|
| 124 |
+
placeholder="Any other relevant observations...",
|
| 125 |
+
elem_id="additional_notes",
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Validation status
|
| 129 |
+
with gr.Row():
|
| 130 |
+
validation_status = gr.HTML(
|
| 131 |
+
value="",
|
| 132 |
+
elem_id="tab4_validation",
|
| 133 |
+
)
|
| 134 |
+
|
| 135 |
+
with gr.Row():
|
| 136 |
+
back_btn = gr.Button("← Back to Images")
|
| 137 |
+
validate_btn = gr.Button(
|
| 138 |
+
"Save & Continue to Generate Results →",
|
| 139 |
+
variant="primary",
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
return {
|
| 143 |
+
"smoke_odor": smoke_odor,
|
| 144 |
+
"odor_intensity": odor_intensity,
|
| 145 |
+
"visible_soot": visible_soot,
|
| 146 |
+
"soot_description": soot_description,
|
| 147 |
+
"large_char": large_char,
|
| 148 |
+
"char_density": char_density,
|
| 149 |
+
"ash_residue": ash_residue,
|
| 150 |
+
"ash_description": ash_description,
|
| 151 |
+
"surface_discoloration": surface_discoloration,
|
| 152 |
+
"discoloration_description": discoloration_description,
|
| 153 |
+
"dust_interference": dust_interference,
|
| 154 |
+
"dust_notes": dust_notes,
|
| 155 |
+
"wildfire_indicators": wildfire_indicators,
|
| 156 |
+
"wildfire_notes": wildfire_notes,
|
| 157 |
+
"additional_notes": additional_notes,
|
| 158 |
+
"validation_status": validation_status,
|
| 159 |
+
"back_btn": back_btn,
|
| 160 |
+
"validate_btn": validate_btn,
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
def update_session_from_form(
|
| 165 |
+
session: SessionState,
|
| 166 |
+
smoke_odor: bool,
|
| 167 |
+
odor_intensity: str,
|
| 168 |
+
visible_soot: bool,
|
| 169 |
+
soot_description: str,
|
| 170 |
+
large_char: bool,
|
| 171 |
+
char_density: str,
|
| 172 |
+
ash_residue: bool,
|
| 173 |
+
ash_description: str,
|
| 174 |
+
surface_discoloration: bool,
|
| 175 |
+
discoloration_description: str,
|
| 176 |
+
dust_interference: bool,
|
| 177 |
+
dust_notes: str,
|
| 178 |
+
wildfire_indicators: bool,
|
| 179 |
+
wildfire_notes: str,
|
| 180 |
+
additional_notes: str,
|
| 181 |
+
) -> SessionState:
|
| 182 |
+
"""Update session state from form values."""
|
| 183 |
+
session.observations = ObservationsFormData(
|
| 184 |
+
smoke_fire_odor=smoke_odor or False,
|
| 185 |
+
odor_intensity=ODOR_MAP.get(odor_intensity, "none"),
|
| 186 |
+
visible_soot_deposits=visible_soot or False,
|
| 187 |
+
soot_pattern_description=soot_description or "",
|
| 188 |
+
large_char_particles=large_char or False,
|
| 189 |
+
char_density_estimate=CHAR_DENSITY_MAP.get(char_density),
|
| 190 |
+
ash_like_residue=ash_residue or False,
|
| 191 |
+
ash_color_texture=ash_description or "",
|
| 192 |
+
surface_discoloration=surface_discoloration or False,
|
| 193 |
+
discoloration_description=discoloration_description or "",
|
| 194 |
+
dust_loading_interference=dust_interference or False,
|
| 195 |
+
dust_notes=dust_notes or "",
|
| 196 |
+
wildfire_indicators=wildfire_indicators or False,
|
| 197 |
+
wildfire_notes=wildfire_notes or "",
|
| 198 |
+
additional_notes=additional_notes or "",
|
| 199 |
+
)
|
| 200 |
+
session.update_timestamp()
|
| 201 |
+
return session
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def validate_and_continue(
|
| 205 |
+
session: SessionState,
|
| 206 |
+
smoke_odor: bool,
|
| 207 |
+
odor_intensity: str,
|
| 208 |
+
visible_soot: bool,
|
| 209 |
+
soot_description: str,
|
| 210 |
+
large_char: bool,
|
| 211 |
+
char_density: str,
|
| 212 |
+
ash_residue: bool,
|
| 213 |
+
ash_description: str,
|
| 214 |
+
surface_discoloration: bool,
|
| 215 |
+
discoloration_description: str,
|
| 216 |
+
dust_interference: bool,
|
| 217 |
+
dust_notes: str,
|
| 218 |
+
wildfire_indicators: bool,
|
| 219 |
+
wildfire_notes: str,
|
| 220 |
+
additional_notes: str,
|
| 221 |
+
) -> tuple[SessionState, str, int]:
|
| 222 |
+
"""Save observations and proceed to Tab 5.
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
Tuple of (session, validation_html, next_tab_index).
|
| 226 |
+
"""
|
| 227 |
+
# Update session
|
| 228 |
+
session = update_session_from_form(
|
| 229 |
+
session,
|
| 230 |
+
smoke_odor,
|
| 231 |
+
odor_intensity,
|
| 232 |
+
visible_soot,
|
| 233 |
+
soot_description,
|
| 234 |
+
large_char,
|
| 235 |
+
char_density,
|
| 236 |
+
ash_residue,
|
| 237 |
+
ash_description,
|
| 238 |
+
surface_discoloration,
|
| 239 |
+
discoloration_description,
|
| 240 |
+
dust_interference,
|
| 241 |
+
dust_notes,
|
| 242 |
+
wildfire_indicators,
|
| 243 |
+
wildfire_notes,
|
| 244 |
+
additional_notes,
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# Tab 4 has no required fields
|
| 248 |
+
session.tab4_complete = True
|
| 249 |
+
|
| 250 |
+
html = """
|
| 251 |
+
<div style="background: #e8f5e9; border: 1px solid #66bb6a; border-radius: 4px; padding: 10px;">
|
| 252 |
+
<span style="color: #2e7d32;">✓ Observations saved. Proceeding to Generate Results...</span>
|
| 253 |
+
</div>
|
| 254 |
+
"""
|
| 255 |
+
return session, html, 4 # Go to tab index 4 (Results)
|
| 256 |
+
|
| 257 |
+
|
| 258 |
+
def load_form_from_session(session: SessionState) -> tuple:
|
| 259 |
+
"""Load form values from session state.
|
| 260 |
+
|
| 261 |
+
Returns:
|
| 262 |
+
Tuple of form values in component order.
|
| 263 |
+
"""
|
| 264 |
+
obs = session.observations
|
| 265 |
+
return (
|
| 266 |
+
obs.smoke_fire_odor,
|
| 267 |
+
ODOR_MAP_REVERSE.get(obs.odor_intensity, "None"),
|
| 268 |
+
obs.visible_soot_deposits,
|
| 269 |
+
obs.soot_pattern_description,
|
| 270 |
+
obs.large_char_particles,
|
| 271 |
+
CHAR_DENSITY_MAP_REVERSE.get(obs.char_density_estimate, "None"),
|
| 272 |
+
obs.ash_like_residue,
|
| 273 |
+
obs.ash_color_texture,
|
| 274 |
+
obs.surface_discoloration,
|
| 275 |
+
obs.discoloration_description,
|
| 276 |
+
obs.dust_loading_interference,
|
| 277 |
+
obs.dust_notes,
|
| 278 |
+
obs.wildfire_indicators,
|
| 279 |
+
obs.wildfire_notes,
|
| 280 |
+
obs.additional_notes,
|
| 281 |
+
)
|
ui/tabs/project.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tab 1: Project Information.
|
| 2 |
+
|
| 3 |
+
Collects project details, client information, and facility classification.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import gradio as gr
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from ui.state import SessionState, ProjectFormData
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Map UI values to schema values
|
| 13 |
+
FACILITY_MAP = {
|
| 14 |
+
"Non-Operational": "non-operational",
|
| 15 |
+
"Operational": "operational",
|
| 16 |
+
"Public/Childcare": "public-childcare",
|
| 17 |
+
}
|
| 18 |
+
FACILITY_MAP_REVERSE = {v: k for k, v in FACILITY_MAP.items()}
|
| 19 |
+
|
| 20 |
+
ERA_MAP = {
|
| 21 |
+
"Pre-1980": "pre-1980",
|
| 22 |
+
"1980-2000": "1980-2000",
|
| 23 |
+
"Post-2000": "post-2000",
|
| 24 |
+
}
|
| 25 |
+
ERA_MAP_REVERSE = {v: k for k, v in ERA_MAP.items()}
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def create_tab() -> dict[str, Any]:
|
| 29 |
+
"""Create Tab 1 UI components.
|
| 30 |
+
|
| 31 |
+
Returns:
|
| 32 |
+
Dictionary of component references for event wiring.
|
| 33 |
+
"""
|
| 34 |
+
gr.Markdown("### Project Information")
|
| 35 |
+
gr.Markdown("*Enter project details, client information, and facility classification.*")
|
| 36 |
+
|
| 37 |
+
with gr.Row():
|
| 38 |
+
with gr.Column():
|
| 39 |
+
project_name = gr.Textbox(
|
| 40 |
+
label="Project/Facility Name *",
|
| 41 |
+
placeholder="e.g., ABC Warehouse",
|
| 42 |
+
elem_id="project_name",
|
| 43 |
+
)
|
| 44 |
+
address = gr.Textbox(
|
| 45 |
+
label="Street Address *",
|
| 46 |
+
elem_id="address",
|
| 47 |
+
)
|
| 48 |
+
with gr.Row():
|
| 49 |
+
city = gr.Textbox(label="City *", elem_id="city")
|
| 50 |
+
state = gr.Textbox(
|
| 51 |
+
label="State *",
|
| 52 |
+
max_lines=1,
|
| 53 |
+
elem_id="state",
|
| 54 |
+
)
|
| 55 |
+
zip_code = gr.Textbox(
|
| 56 |
+
label="ZIP Code *",
|
| 57 |
+
max_lines=1,
|
| 58 |
+
elem_id="zip_code",
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
with gr.Column():
|
| 62 |
+
client_name = gr.Textbox(
|
| 63 |
+
label="Client Name *",
|
| 64 |
+
elem_id="client_name",
|
| 65 |
+
)
|
| 66 |
+
fire_date = gr.Textbox(
|
| 67 |
+
label="Fire Date *",
|
| 68 |
+
placeholder="YYYY-MM-DD",
|
| 69 |
+
elem_id="fire_date",
|
| 70 |
+
)
|
| 71 |
+
assessment_date = gr.Textbox(
|
| 72 |
+
label="Assessment Date *",
|
| 73 |
+
placeholder="YYYY-MM-DD",
|
| 74 |
+
elem_id="assessment_date",
|
| 75 |
+
)
|
| 76 |
+
|
| 77 |
+
with gr.Row():
|
| 78 |
+
facility_classification = gr.Radio(
|
| 79 |
+
choices=["Non-Operational", "Operational", "Public/Childcare"],
|
| 80 |
+
label="Facility Classification *",
|
| 81 |
+
value="Non-Operational",
|
| 82 |
+
info="Affects clearance thresholds (see FDAM §3.1)",
|
| 83 |
+
elem_id="facility_classification",
|
| 84 |
+
)
|
| 85 |
+
construction_era = gr.Radio(
|
| 86 |
+
choices=["Pre-1980", "1980-2000", "Post-2000"],
|
| 87 |
+
label="Construction Era *",
|
| 88 |
+
value="Post-2000",
|
| 89 |
+
info="Affects LBP/ACM regulatory flags",
|
| 90 |
+
elem_id="construction_era",
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
with gr.Row():
|
| 94 |
+
assessor_name = gr.Textbox(
|
| 95 |
+
label="Assessor Name *",
|
| 96 |
+
elem_id="assessor_name",
|
| 97 |
+
)
|
| 98 |
+
assessor_credentials = gr.Textbox(
|
| 99 |
+
label="Credentials (optional)",
|
| 100 |
+
placeholder="CIH, CSP, etc.",
|
| 101 |
+
elem_id="assessor_credentials",
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
# Validation status display
|
| 105 |
+
with gr.Row():
|
| 106 |
+
validation_status = gr.HTML(
|
| 107 |
+
value="",
|
| 108 |
+
elem_id="tab1_validation",
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
with gr.Row():
|
| 112 |
+
validate_btn = gr.Button(
|
| 113 |
+
"Validate & Continue to Rooms →",
|
| 114 |
+
variant="primary",
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
return {
|
| 118 |
+
"project_name": project_name,
|
| 119 |
+
"address": address,
|
| 120 |
+
"city": city,
|
| 121 |
+
"state": state,
|
| 122 |
+
"zip_code": zip_code,
|
| 123 |
+
"client_name": client_name,
|
| 124 |
+
"fire_date": fire_date,
|
| 125 |
+
"assessment_date": assessment_date,
|
| 126 |
+
"facility_classification": facility_classification,
|
| 127 |
+
"construction_era": construction_era,
|
| 128 |
+
"assessor_name": assessor_name,
|
| 129 |
+
"assessor_credentials": assessor_credentials,
|
| 130 |
+
"validation_status": validation_status,
|
| 131 |
+
"validate_btn": validate_btn,
|
| 132 |
+
}
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def update_session_from_form(
|
| 136 |
+
session: SessionState,
|
| 137 |
+
project_name: str,
|
| 138 |
+
address: str,
|
| 139 |
+
city: str,
|
| 140 |
+
state: str,
|
| 141 |
+
zip_code: str,
|
| 142 |
+
client_name: str,
|
| 143 |
+
fire_date: str,
|
| 144 |
+
assessment_date: str,
|
| 145 |
+
facility_classification: str,
|
| 146 |
+
construction_era: str,
|
| 147 |
+
assessor_name: str,
|
| 148 |
+
assessor_credentials: str,
|
| 149 |
+
) -> SessionState:
|
| 150 |
+
"""Update session state from form values."""
|
| 151 |
+
session.project = ProjectFormData(
|
| 152 |
+
project_name=project_name or "",
|
| 153 |
+
address=address or "",
|
| 154 |
+
city=city or "",
|
| 155 |
+
state=state or "",
|
| 156 |
+
zip_code=zip_code or "",
|
| 157 |
+
client_name=client_name or "",
|
| 158 |
+
fire_date=fire_date or "",
|
| 159 |
+
assessment_date=assessment_date or "",
|
| 160 |
+
facility_classification=FACILITY_MAP.get(facility_classification, "non-operational"),
|
| 161 |
+
construction_era=ERA_MAP.get(construction_era, "post-2000"),
|
| 162 |
+
assessor_name=assessor_name or "",
|
| 163 |
+
assessor_credentials=assessor_credentials or "",
|
| 164 |
+
)
|
| 165 |
+
session.update_timestamp()
|
| 166 |
+
return session
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def validate_and_continue(
|
| 170 |
+
session: SessionState,
|
| 171 |
+
project_name: str,
|
| 172 |
+
address: str,
|
| 173 |
+
city: str,
|
| 174 |
+
state: str,
|
| 175 |
+
zip_code: str,
|
| 176 |
+
client_name: str,
|
| 177 |
+
fire_date: str,
|
| 178 |
+
assessment_date: str,
|
| 179 |
+
facility_classification: str,
|
| 180 |
+
construction_era: str,
|
| 181 |
+
assessor_name: str,
|
| 182 |
+
assessor_credentials: str,
|
| 183 |
+
) -> tuple[SessionState, str, int]:
|
| 184 |
+
"""Validate Tab 1 and update session.
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
Tuple of (updated session, validation HTML, next tab index).
|
| 188 |
+
"""
|
| 189 |
+
# Update session first
|
| 190 |
+
session = update_session_from_form(
|
| 191 |
+
session,
|
| 192 |
+
project_name,
|
| 193 |
+
address,
|
| 194 |
+
city,
|
| 195 |
+
state,
|
| 196 |
+
zip_code,
|
| 197 |
+
client_name,
|
| 198 |
+
fire_date,
|
| 199 |
+
assessment_date,
|
| 200 |
+
facility_classification,
|
| 201 |
+
construction_era,
|
| 202 |
+
assessor_name,
|
| 203 |
+
assessor_credentials,
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
# Validate
|
| 207 |
+
is_valid, errors = session.validate_tab1()
|
| 208 |
+
|
| 209 |
+
if is_valid:
|
| 210 |
+
session.tab1_complete = True
|
| 211 |
+
html = """
|
| 212 |
+
<div style="background: #e8f5e9; border: 1px solid #66bb6a; border-radius: 4px; padding: 10px;">
|
| 213 |
+
<span style="color: #2e7d32;">✓ Project information complete. Proceeding to Rooms tab...</span>
|
| 214 |
+
</div>
|
| 215 |
+
"""
|
| 216 |
+
return session, html, 1 # Go to tab index 1 (Rooms)
|
| 217 |
+
else:
|
| 218 |
+
session.tab1_complete = False
|
| 219 |
+
error_items = "".join(f"<li>{e}</li>" for e in errors)
|
| 220 |
+
html = f"""
|
| 221 |
+
<div style="background: #ffebee; border: 1px solid #ef5350; border-radius: 4px; padding: 10px;">
|
| 222 |
+
<strong style="color: #c62828;">Please fix the following:</strong>
|
| 223 |
+
<ul style="margin: 5px 0 0 0; padding-left: 20px; color: #c62828;">
|
| 224 |
+
{error_items}
|
| 225 |
+
</ul>
|
| 226 |
+
</div>
|
| 227 |
+
"""
|
| 228 |
+
return session, html, 0 # Stay on current tab
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def load_form_from_session(session: SessionState) -> tuple:
|
| 232 |
+
"""Load form values from session state.
|
| 233 |
+
|
| 234 |
+
Returns:
|
| 235 |
+
Tuple of form values in component order.
|
| 236 |
+
"""
|
| 237 |
+
p = session.project
|
| 238 |
+
return (
|
| 239 |
+
p.project_name,
|
| 240 |
+
p.address,
|
| 241 |
+
p.city,
|
| 242 |
+
p.state,
|
| 243 |
+
p.zip_code,
|
| 244 |
+
p.client_name,
|
| 245 |
+
p.fire_date,
|
| 246 |
+
p.assessment_date,
|
| 247 |
+
FACILITY_MAP_REVERSE.get(p.facility_classification, "Non-Operational"),
|
| 248 |
+
ERA_MAP_REVERSE.get(p.construction_era, "Post-2000"),
|
| 249 |
+
p.assessor_name,
|
| 250 |
+
p.assessor_credentials,
|
| 251 |
+
)
|