Spaces:
Paused
Paused
| """Tab 5: Generate Results. | |
| Process all inputs and generate assessment outputs. | |
| """ | |
| import gradio as gr | |
| from typing import Any, Optional | |
| from datetime import datetime | |
| import tempfile | |
| from ui.state import SessionState | |
| from ui.components import create_stats_dict, create_progress_html, image_store | |
| from pipeline import FDAMPipeline, PipelineResult, PDFGenerator | |
| def create_tab() -> dict[str, Any]: | |
| """Create Tab 5 UI components. | |
| Returns: | |
| Dictionary of component references for event wiring. | |
| """ | |
| gr.Markdown("### Generate Assessment") | |
| # Pre-flight check summary | |
| with gr.Row(): | |
| preflight_status = gr.HTML( | |
| value="", | |
| elem_id="preflight_status", | |
| ) | |
| gr.Markdown( | |
| """ | |
| Click below to process all inputs and generate: | |
| 1. **Cleaning Specification / Scope of Work** (primary output) | |
| 2. **Sampling Plan Recommendations** | |
| 3. **Confidence Report** | |
| """ | |
| ) | |
| with gr.Row(): | |
| generate_btn = gr.Button( | |
| "Generate Assessment", | |
| variant="primary", | |
| scale=2, | |
| elem_id="generate_btn", | |
| ) | |
| processing_status = gr.Textbox( | |
| label="Status", | |
| value="Ready", | |
| interactive=False, | |
| elem_id="processing_status", | |
| ) | |
| # Progress display | |
| with gr.Row(): | |
| progress_html = gr.HTML( | |
| value="", | |
| elem_id="progress_html", | |
| ) | |
| gr.Markdown("---") | |
| gr.Markdown("### Results") | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("#### Annotated Images") | |
| annotated_gallery = gr.Gallery( | |
| label="AI-Analyzed Images", | |
| columns=2, | |
| height="auto", | |
| elem_id="annotated_gallery", | |
| ) | |
| with gr.Column(): | |
| gr.Markdown("#### Assessment Summary") | |
| stats_output = gr.JSON( | |
| label="Statistics", | |
| elem_id="stats_output", | |
| ) | |
| gr.Markdown("### Cleaning Specification / Scope of Work") | |
| sow_output = gr.Markdown( | |
| value="*Results will appear here after generation.*", | |
| elem_id="sow_output", | |
| ) | |
| gr.Markdown("### Downloads") | |
| with gr.Row(): | |
| download_md = gr.File( | |
| label="Download Markdown (.md)", | |
| elem_id="download_md", | |
| ) | |
| download_pdf = gr.File( | |
| label="Download PDF (.pdf)", | |
| elem_id="download_pdf", | |
| ) | |
| # Navigation | |
| with gr.Row(): | |
| back_btn = gr.Button("← Back to Observations") | |
| regenerate_btn = gr.Button( | |
| "Regenerate Assessment", | |
| variant="secondary", | |
| ) | |
| return { | |
| "preflight_status": preflight_status, | |
| "generate_btn": generate_btn, | |
| "processing_status": processing_status, | |
| "progress_html": progress_html, | |
| "annotated_gallery": annotated_gallery, | |
| "stats_output": stats_output, | |
| "sow_output": sow_output, | |
| "download_md": download_md, | |
| "download_pdf": download_pdf, | |
| "back_btn": back_btn, | |
| "regenerate_btn": regenerate_btn, | |
| } | |
| def check_preflight(session: SessionState) -> str: | |
| """Check if assessment can be generated. | |
| Returns: | |
| HTML string with preflight status. | |
| """ | |
| can_generate, errors = session.can_generate() | |
| # Also check if images are in memory | |
| expected_ids = [img.id for img in session.images] | |
| missing_ids = image_store.get_missing_ids(expected_ids) | |
| if missing_ids: | |
| errors.append(f"{len(missing_ids)} image(s) need to be re-uploaded") | |
| can_generate = False | |
| if can_generate: | |
| # Show summary of what will be processed | |
| stats = create_stats_dict(session) | |
| return f""" | |
| <div style="background: #e8f5e9; border: 1px solid #66bb6a; border-radius: 4px; padding: 15px;"> | |
| <strong style="color: #2e7d32;">✓ Ready to Generate</strong> | |
| <div style="margin-top: 10px; color: #333;"> | |
| <strong>Room:</strong> {stats['room_name']}<br> | |
| <strong>Images:</strong> {stats['images']}<br> | |
| <strong>Total Area:</strong> {stats['total_floor_area_sf']} SF<br> | |
| <strong>Facility:</strong> {stats['facility_classification']}<br> | |
| <strong>Era:</strong> {stats['construction_era']} | |
| </div> | |
| </div> | |
| """ | |
| else: | |
| error_items = "".join(f"<li>{e}</li>" for e in errors) | |
| return f""" | |
| <div style="background: #ffebee; border: 1px solid #ef5350; border-radius: 4px; padding: 15px;"> | |
| <strong style="color: #c62828;">Cannot Generate - Please Fix:</strong> | |
| <ul style="margin: 10px 0 0 0; padding-left: 20px; color: #c62828;"> | |
| {error_items} | |
| </ul> | |
| </div> | |
| """ | |
| def generate_assessment( | |
| session: SessionState, | |
| progress: Optional[gr.Progress] = None, | |
| ) -> tuple[SessionState, str, str, list[tuple], dict, str, Optional[str], Optional[str]]: | |
| """Generate the assessment using the FDAM pipeline. | |
| Returns: | |
| Tuple of (session, status, progress_html, annotated_images, | |
| stats, sow_markdown, md_file_path, pdf_file_path). | |
| """ | |
| # Create pipeline instance | |
| pipeline = FDAMPipeline() | |
| # Define progress callback for Gradio | |
| def progress_callback(prog): | |
| if progress: | |
| progress(prog.percent, desc=prog.message) | |
| # Execute pipeline | |
| result: PipelineResult = pipeline.execute( | |
| session=session, | |
| progress_callback=progress_callback, | |
| ) | |
| # Handle errors | |
| if not result.success: | |
| error_msg = "**Error:** Please fix the following before generating:\n\n" | |
| error_msg += "\n".join(f"- {e}" for e in result.errors) | |
| return ( | |
| result.session, | |
| "Error: Cannot generate", | |
| "", | |
| [], | |
| {}, | |
| error_msg, | |
| None, | |
| None, | |
| ) | |
| # Generate stats dictionary for UI | |
| stats = pipeline.generate_stats_dict(result) | |
| # Get markdown content | |
| sow_markdown = result.document.markdown if result.document else "" | |
| # Save markdown file | |
| md_path = None | |
| pdf_path = None | |
| try: | |
| if sow_markdown: | |
| # Save Markdown file | |
| room_name_safe = session.room.name.replace(' ', '_') if session.room.name else "Room" | |
| with tempfile.NamedTemporaryFile( | |
| mode='w', | |
| suffix='.md', | |
| delete=False, | |
| prefix=f"SOW_{room_name_safe}_", | |
| ) as f: | |
| f.write(sow_markdown) | |
| md_path = f.name | |
| # Generate PDF | |
| pdf_generator = PDFGenerator() | |
| pdf_result = pdf_generator.generate_pdf(sow_markdown) | |
| if pdf_result.success: | |
| pdf_path = pdf_result.pdf_path | |
| else: | |
| result.warnings.append(f"PDF generation failed: {pdf_result.error_message}") | |
| except Exception as e: | |
| print(f"Error saving files: {e}") | |
| # Add warnings to status if any | |
| status = "Complete" | |
| if result.warnings: | |
| status = f"Complete ({len(result.warnings)} warnings)" | |
| return ( | |
| result.session, | |
| status, | |
| create_progress_html(6, 6, f"Complete! ({result.execution_time_seconds:.1f}s)"), | |
| result.annotated_images, | |
| stats, | |
| sow_markdown, | |
| md_path, | |
| pdf_path, | |
| ) | |
| def _generate_sow_markdown( | |
| session: SessionState, | |
| stats: dict, | |
| vision_results: dict, | |
| ) -> str: | |
| """Generate Scope of Work markdown document. | |
| This is a placeholder - real implementation uses DocumentGenerator. | |
| Kept for backwards compatibility but should not be called directly. | |
| """ | |
| r = session.room | |
| area = r.length_ft * r.width_ft | |
| volume = area * r.ceiling_height_ft | |
| # Build vision summary | |
| vision_lines = [] | |
| for img_meta in session.images: | |
| result = vision_results.get(img_meta.id, {}) | |
| zone = result.get("zone", {}).get("classification", "N/A") | |
| condition = result.get("condition", {}).get("level", "N/A") | |
| vision_lines.append(f"- **{img_meta.filename}**: Zone={zone}, Condition={condition}") | |
| vision_summary = "\n".join(vision_lines) if vision_lines else "No images analyzed." | |
| # Build observations summary | |
| obs = session.observations | |
| obs_items = [] | |
| if obs.smoke_fire_odor: | |
| obs_items.append(f"- Smoke/fire odor: {obs.odor_intensity}") | |
| if obs.visible_soot_deposits: | |
| obs_items.append(f"- Visible soot deposits: {obs.soot_pattern_description or 'Yes'}") | |
| if obs.large_char_particles: | |
| obs_items.append(f"- Large char particles: {obs.char_density_estimate or 'Yes'}") | |
| if obs.ash_like_residue: | |
| obs_items.append(f"- Ash residue: {obs.ash_color_texture or 'Yes'}") | |
| if obs.surface_discoloration: | |
| obs_items.append(f"- Surface discoloration: {obs.discoloration_description or 'Yes'}") | |
| if obs.wildfire_indicators: | |
| obs_items.append(f"- Wildfire indicators: {obs.wildfire_notes or 'Yes'}") | |
| obs_summary = "\n".join(obs_items) if obs_items else "No significant observations noted." | |
| # Regulatory flags | |
| reg_flags = "\n".join(f"- {f}" for f in stats.get("regulatory_flags", [])) or "None identified." | |
| markdown = f"""# Cleaning Specification / Scope of Work | |
| ## Room Information | |
| | Field | Value | | |
| |-------|-------| | |
| | **Room Name** | {r.name} | | |
| | **Facility Classification** | {r.facility_classification or 'Not specified'} | | |
| | **Construction Era** | {r.construction_era or 'Not specified'} | | |
| --- | |
| ## Scope Summary | |
| | Metric | Value | | |
| |--------|-------| | |
| | Room | {r.name} | | |
| | Total Floor Area | {stats['total_floor_area_sf']} SF | | |
| | Total Volume | {stats['total_volume_cf']} CF | | |
| | Images Analyzed | {stats['total_images']} | | |
| --- | |
| ## Room Details | |
| | Property | Value | | |
| |----------|-------| | |
| | **Room Name** | {r.name} | | |
| | **Dimensions** | {r.length_ft:.0f}' x {r.width_ft:.0f}' x {r.ceiling_height_ft:.0f}' | | |
| | **Floor Area** | {area:,.0f} SF | | |
| | **Volume** | {volume:,.0f} CF | | |
| --- | |
| ## AI Vision Analysis Summary | |
| {vision_summary} | |
| --- | |
| ## Field Observations | |
| {obs_summary} | |
| --- | |
| ## Air Filtration Requirements | |
| Per NADCA ACR 2021, Section 3.6: | |
| - **Required ACH**: 4 air changes per hour | |
| - **Total Volume**: {stats['total_volume_cf']} CF | |
| - **Air Scrubbers Required**: {stats['air_scrubbers_required']} units (2000 CFM each) | |
| - **Calculation**: ({stats['total_volume_cf']} CF × 4 ACH) / (2000 CFM × 60) = {stats['air_scrubbers_required']} units | |
| --- | |
| ## Regulatory Flags | |
| {reg_flags} | |
| --- | |
| ## Sampling Recommendations | |
| *Detailed sampling plan to be generated based on surface inventory and zone classifications.* | |
| --- | |
| ## Disclaimer | |
| This document was generated using AI-assisted analysis and should be reviewed by a qualified | |
| industrial hygienist before implementation. Visual assessments require laboratory confirmation | |
| for definitive particle identification. | |
| --- | |
| *Generated by FDAM AI Pipeline v4.0.1* | |
| *{datetime.now().strftime('%Y-%m-%d %H:%M')}* | |
| """ | |
| return markdown | |