"""FDAM AI Pipeline - Fire Damage Assessment Methodology v4.0.1
Main Gradio application entry point with session state and chat functionality.
Simplified UI: 2 tabs (Input + Results/Chat).
"""
import gradio as gr
from config.logging import setup_logging
from config.settings import settings
# Initialize logging before any other imports that might log
setup_logging(settings.log_level)
import logging
logger = logging.getLogger(__name__)
from models.loader import get_models
from ui.state import SessionState, create_new_session
from ui.storage import get_head_html
from ui.tabs import input_tab, results_tab
from ui import samples
from pipeline.chat import ChatHandler, get_quick_action_message
# Keyboard shortcuts JavaScript (Ctrl+1-2 for tab navigation)
KEYBOARD_JS = """
"""
# Validation CSS classes
VALIDATION_CSS = """
.valid-field input, .valid-field textarea {
border-color: #66bb6a !important;
}
.invalid-field input, .invalid-field textarea {
border-color: #ef5350 !important;
}
"""
def ensure_rag_index():
"""Ensure RAG index is built. Builds on first run if empty."""
from pathlib import Path
try:
# Check if index exists and has content
chroma_path = Path(__file__).parent / "chroma_db"
if not chroma_path.exists() or not any(chroma_path.iterdir()):
logger.info("RAG index empty or missing - building from RAG-KB...")
from rag.index_builder import build_index
stats = build_index(rebuild=False)
logger.info(f"RAG index built: {stats['chunks_created']} chunks from {stats['documents_processed']} documents")
else:
logger.info("RAG index found")
except Exception as e:
logger.warning(f"RAG index build failed (will use fallback): {e}")
def create_app() -> gr.Blocks:
"""Create the main Gradio application."""
# Initialize models at startup
model_stack = get_models()
# Ensure RAG index is built (builds on first run)
ensure_rag_index()
# Initialize chat handler
chat_handler = ChatHandler(model_stack)
with gr.Blocks(
title="FDAM AI Pipeline - Fire Damage Assessment",
css=VALIDATION_CSS,
head=get_head_html(KEYBOARD_JS),
) as app:
# Session state (stored in Gradio State component)
session_state = gr.State(value=create_new_session())
# Header
gr.Markdown(
"""
# FDAM AI Pipeline
## Fire Damage Assessment Methodology v4.0.1
Upload images and room information to generate a professional
Cleaning Specification / Scope of Work.
"""
)
# Mode indicator
if settings.mock_models:
gr.Markdown(
"""
> **Development Mode**: Using mock models for testing.
> Set `MOCK_MODELS=false` for production inference.
"""
)
# Sample loader dropdown
with gr.Row():
sample_dropdown = gr.Dropdown(
label="Load Sample",
choices=samples.get_sample_choices(),
value="",
elem_id="sample_dropdown",
scale=2,
)
sample_status = gr.HTML(
value="",
elem_id="sample_status",
)
# Tab navigation (2 tabs)
with gr.Tabs() as tabs:
# Tab 1: Input (combined room + images + observations)
tab_input = gr.Tab("1. Input", id=0, elem_id="tab-input")
with tab_input:
tab1 = input_tab.create_tab()
# Tab 2: Results + Chat
tab_results = gr.Tab("2. Results", id=1, elem_id="tab-results")
with tab_results:
tab2 = results_tab.create_tab()
# --- Event Handlers ---
# Sample Loader
def handle_sample_load(scenario_id: str, current_session: SessionState):
"""Handle sample dropdown selection."""
if not scenario_id:
return (
current_session,
*input_tab.load_room_from_session(current_session),
*input_tab.load_images_from_session(current_session),
*input_tab.load_observations_from_session(current_session),
gr.update(),
"",
"",
)
# Load the sample
new_session = samples.load_sample(scenario_id)
if not new_session:
return (
current_session,
*input_tab.load_room_from_session(current_session),
*input_tab.load_images_from_session(current_session),
*input_tab.load_observations_from_session(current_session),
gr.update(),
'Error: Sample not found',
"",
)
# Get scenario name for status message
scenario = samples.get_scenario_by_id(scenario_id)
name = scenario.name if scenario else scenario_id
# Load form values from new session
room_values = input_tab.load_room_from_session(new_session)
image_values = input_tab.load_images_from_session(new_session)
obs_values = input_tab.load_observations_from_session(new_session)
return (
new_session,
*room_values,
*image_values,
*obs_values,
gr.update(selected=0), # Stay on Input tab
f'Loaded sample: {name}',
"", # reset dropdown
)
sample_dropdown.change(
fn=handle_sample_load,
inputs=[sample_dropdown, session_state],
outputs=[
session_state,
# Room outputs (9)
tab1["room_name"],
tab1["room_length"],
tab1["room_width"],
tab1["room_height_preset"],
tab1["room_height_custom"],
tab1["floor_area"],
tab1["room_volume"],
tab1["facility_classification"],
tab1["construction_era"],
# Image outputs (3)
tab1["images_gallery"],
tab1["image_count"],
tab1["resume_warning"],
# Observation outputs (15)
tab1["smoke_odor"],
tab1["odor_intensity"],
tab1["visible_soot"],
tab1["soot_description"],
tab1["large_char"],
tab1["char_density"],
tab1["ash_residue"],
tab1["ash_description"],
tab1["surface_discoloration"],
tab1["discoloration_description"],
tab1["dust_interference"],
tab1["dust_notes"],
tab1["wildfire_indicators"],
tab1["wildfire_notes"],
tab1["additional_notes"],
# Navigation
tabs,
sample_status,
sample_dropdown,
],
)
# --- Tab 1: Input ---
# Room field changes - save to session and update calculations
def on_room_field_change(
session: SessionState,
name: str,
length: float | None,
width: float | None,
height_preset: int | None,
height_custom: float | None,
facility_classification: str,
construction_era: str,
):
"""Save room data and update calculated values."""
updated_session = input_tab.save_room_to_session(
session, name, length, width, height_preset, height_custom,
facility_classification, construction_era
)
floor_area, volume = input_tab.update_calculated_values(
length, width, height_preset, height_custom
)
return updated_session, floor_area, volume
room_inputs = [
session_state,
tab1["room_name"],
tab1["room_length"],
tab1["room_width"],
tab1["room_height_preset"],
tab1["room_height_custom"],
tab1["facility_classification"],
tab1["construction_era"],
]
room_outputs = [session_state, tab1["floor_area"], tab1["room_volume"]]
for input_component in [
tab1["room_name"],
tab1["room_length"],
tab1["room_width"],
tab1["room_height_preset"],
tab1["room_height_custom"],
tab1["facility_classification"],
tab1["construction_era"],
]:
input_component.change(
fn=on_room_field_change,
inputs=room_inputs,
outputs=room_outputs,
)
# Show/hide custom height input
tab1["room_height_preset"].change(
fn=input_tab.on_height_preset_change,
inputs=[tab1["room_height_preset"]],
outputs=[tab1["room_height_custom"]],
)
# Image handling
tab1["add_image_btn"].click(
fn=input_tab.add_image,
inputs=[
session_state,
tab1["image_upload"],
tab1["image_description"],
],
outputs=[
session_state,
tab1["images_gallery"],
tab1["validation_status"],
tab1["image_count"],
tab1["image_upload"],
tab1["image_description"],
],
)
tab1["clear_upload_btn"].click(
fn=lambda: (None, ""),
outputs=[
tab1["image_upload"],
tab1["image_description"],
],
)
tab1["remove_last_btn"].click(
fn=input_tab.remove_last_image,
inputs=[session_state],
outputs=[
session_state,
tab1["images_gallery"],
tab1["validation_status"],
tab1["image_count"],
],
)
tab1["clear_all_btn"].click(
fn=input_tab.clear_all_images,
inputs=[session_state],
outputs=[
session_state,
tab1["images_gallery"],
tab1["validation_status"],
tab1["image_count"],
],
)
# Generate button - validate and switch to results
def on_generate_click(
session: SessionState,
smoke_odor: bool,
odor_intensity: str,
visible_soot: bool,
soot_description: str,
large_char: bool,
char_density: str,
ash_residue: bool,
ash_description: str,
surface_discoloration: bool,
discoloration_description: str,
dust_interference: bool,
dust_notes: str,
wildfire_indicators: bool,
wildfire_notes: str,
additional_notes: str,
):
"""Save observations and validate before generating."""
# Save observations first
session = input_tab.save_observations_to_session(
session,
smoke_odor, odor_intensity, visible_soot, soot_description,
large_char, char_density, ash_residue, ash_description,
surface_discoloration, discoloration_description,
dust_interference, dust_notes, wildfire_indicators,
wildfire_notes, additional_notes,
)
# Validate and potentially switch tabs
return input_tab.validate_and_generate(session)
tab1["generate_btn"].click(
fn=on_generate_click,
inputs=[
session_state,
tab1["smoke_odor"],
tab1["odor_intensity"],
tab1["visible_soot"],
tab1["soot_description"],
tab1["large_char"],
tab1["char_density"],
tab1["ash_residue"],
tab1["ash_description"],
tab1["surface_discoloration"],
tab1["discoloration_description"],
tab1["dust_interference"],
tab1["dust_notes"],
tab1["wildfire_indicators"],
tab1["wildfire_notes"],
tab1["additional_notes"],
],
outputs=[
session_state,
tab1["validation_status"],
tabs,
],
)
# --- Tab 2: Results + Chat ---
# Generate assessment
tab2["generate_btn"].click(
fn=results_tab.generate_assessment,
inputs=[session_state],
outputs=[
session_state,
tab2["processing_status"],
tab2["progress_html"],
tab2["annotated_gallery"],
tab2["stats_output"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
tab2["chatbot"],
],
)
tab2["regenerate_btn"].click(
fn=results_tab.generate_assessment,
inputs=[session_state],
outputs=[
session_state,
tab2["processing_status"],
tab2["progress_html"],
tab2["annotated_gallery"],
tab2["stats_output"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
tab2["chatbot"],
],
)
# Back to input
tab2["back_btn"].click(
fn=lambda: gr.update(selected=0),
outputs=[tabs],
)
# Reset document
def on_reset_document(session: SessionState):
"""Reset document to original and regenerate downloads."""
session, doc = results_tab.reset_document(session)
md_path, pdf_path = results_tab.regenerate_downloads(session)
return session, doc, md_path, pdf_path
tab2["reset_doc_btn"].click(
fn=on_reset_document,
inputs=[session_state],
outputs=[
session_state,
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
# Chat functionality
def handle_chat_message(
message: str,
session: SessionState,
chat_history: list[dict],
):
"""Process chat message and update UI."""
if not message.strip():
return session, chat_history, "", session.generated_document or "", None, None
response, edit, updated_history = chat_handler.process_message(
message, session, chat_history
)
# Apply document edit if present
if edit and session.generated_document:
session.generated_document = chat_handler.apply_document_edit(
session.generated_document, edit
)
session.update_timestamp()
# Regenerate downloads
md_path, pdf_path = results_tab.regenerate_downloads(session)
else:
md_path, pdf_path = None, None
# Store chat history in session
session.chat_history = updated_history
return (
session,
updated_history,
"", # Clear input
session.generated_document or "",
md_path,
pdf_path,
)
# Chat send button
tab2["chat_send_btn"].click(
fn=handle_chat_message,
inputs=[tab2["chat_input"], session_state, tab2["chatbot"]],
outputs=[
session_state,
tab2["chatbot"],
tab2["chat_input"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
# Chat input enter key
tab2["chat_input"].submit(
fn=handle_chat_message,
inputs=[tab2["chat_input"], session_state, tab2["chatbot"]],
outputs=[
session_state,
tab2["chatbot"],
tab2["chat_input"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
# Quick action buttons
def send_quick_action(action_key: str, session: SessionState, chat_history: list[dict]):
"""Send a quick action message."""
message = get_quick_action_message(action_key)
return handle_chat_message(message, session, chat_history)
tab2["quick_explain_zones"].click(
fn=lambda s, h: send_quick_action("explain_zones", s, h),
inputs=[session_state, tab2["chatbot"]],
outputs=[
session_state,
tab2["chatbot"],
tab2["chat_input"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
tab2["quick_explain_materials"].click(
fn=lambda s, h: send_quick_action("explain_materials", s, h),
inputs=[session_state, tab2["chatbot"]],
outputs=[
session_state,
tab2["chatbot"],
tab2["chat_input"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
tab2["quick_sampling"].click(
fn=lambda s, h: send_quick_action("explain_sampling", s, h),
inputs=[session_state, tab2["chatbot"]],
outputs=[
session_state,
tab2["chatbot"],
tab2["chat_input"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
tab2["quick_add_note"].click(
fn=lambda s, h: send_quick_action("add_note", s, h),
inputs=[session_state, tab2["chatbot"]],
outputs=[
session_state,
tab2["chatbot"],
tab2["chat_input"],
tab2["sow_output"],
tab2["download_md"],
tab2["download_pdf"],
],
)
# --- Tab Select Handlers ---
# Load data when switching to Input tab
def load_input_tab(session: SessionState):
"""Load all input data when tab is selected."""
room_values = input_tab.load_room_from_session(session)
image_values = input_tab.load_images_from_session(session)
obs_values = input_tab.load_observations_from_session(session)
return (*room_values, *image_values, *obs_values)
tab_input.select(
fn=load_input_tab,
inputs=[session_state],
outputs=[
# Room (9)
tab1["room_name"],
tab1["room_length"],
tab1["room_width"],
tab1["room_height_preset"],
tab1["room_height_custom"],
tab1["floor_area"],
tab1["room_volume"],
tab1["facility_classification"],
tab1["construction_era"],
# Images (3)
tab1["images_gallery"],
tab1["image_count"],
tab1["resume_warning"],
# Observations (15)
tab1["smoke_odor"],
tab1["odor_intensity"],
tab1["visible_soot"],
tab1["soot_description"],
tab1["large_char"],
tab1["char_density"],
tab1["ash_residue"],
tab1["ash_description"],
tab1["surface_discoloration"],
tab1["discoloration_description"],
tab1["dust_interference"],
tab1["dust_notes"],
tab1["wildfire_indicators"],
tab1["wildfire_notes"],
tab1["additional_notes"],
],
)
# Load data when switching to Results tab
def load_results_tab(session: SessionState):
"""Load results data when tab is selected."""
doc = session.generated_document or "*Generate an assessment to see results here.*"
chat = session.chat_history or []
return doc, chat
tab_results.select(
fn=load_results_tab,
inputs=[session_state],
outputs=[
tab2["sow_output"],
tab2["chatbot"],
],
)
return app
def main():
"""Entry point for the application."""
logger.info("Starting FDAM AI Pipeline v4.0.1")
logger.info(f"Mock models: {settings.mock_models}")
logger.info(f"Log level: {settings.log_level}")
logger.info(f"Server: {settings.server_host}:{settings.server_port}")
app = create_app()
app.launch(
server_name=settings.server_host,
server_port=settings.server_port,
share=False,
)
if __name__ == "__main__":
main()