import gradio as gr
import os
import time
import cv2
import numpy as np
from PIL import Image
import tempfile
import json
import re
# Import consolidated modules
from ocr_module import MVM2OCREngine
from reasoning_engine import run_agent_orchestrator
from verification_service import calculate_symbolic_score
from consensus_fusion import evaluate_consensus
from report_module import generate_mvm2_report, export_to_pdf
from image_enhancing import ImageEnhancer
from flow_module import generate_flow_html
# Initialize Engines
ocr_engine = MVM2OCREngine()
enhancer = ImageEnhancer(sigma=1.2)
# Load custom CSS
with open("theme.css", "r") as f:
css_content = f.read()
def create_gauge(label, value, color="#6366f1"):
"""Generates an animated SVG circular gauge."""
percentage = max(0, min(100, value * 100))
dash_offset = 251.2 * (1 - percentage / 100)
return f"""
{label}
"""
def format_step_viewer(consensus_result):
"""Formats the Reasoning Trace with Step-Level Consensus highlights."""
html = '
'
# We aggregate steps from all agents for a collective view
agent_data = consensus_result.get("detail_scores", [])
for agent in agent_data:
# Simulate step-level analysis for UI purposes:
# In a real system, we'd have Score_j per step. Here we use the agent's overall score.
score = agent["Score_j"]
status_class = "step-valid" if score >= 0.7 else "step-warning"
icon = "✅" if score >= 0.7 else "⚠️"
glow_style = "box-shadow: 0 0 10px rgba(16, 185, 129, 0.2);" if score >= 0.7 else "box-shadow: 0 0 10px rgba(245, 158, 11, 0.2);"
# Get matching agent response for trace
# (This assumes agent_responses were passed in or stored)
# For the UI, we'll just show the representative trace from valid agents
if not agent["is_hallucinating"] or score > 0.4:
verification_msg = f"✅ Ast-Parsed Answer matches consensus group." if score >= 0.7 else f"❌ Answer diverged from consensus."
html += f"""