Migjomatic's picture
Update app.py
d5db6c8 verified
#!/usr/bin/env python3
"""
Main Streamlit application for video frame analysis with ontology-based risk assessment
Refactored for better code organization and maintainability
"""
import streamlit as st
import json
from dotenv import load_dotenv
# --- Reproducibility & Threading ---
import os, random
import numpy as np
# Falls torch/cv2 verfügbar sind: Seeds/Threads setzen
try:
import torch
TORCH_AVAILABLE = True
except Exception:
TORCH_AVAILABLE = False
try:
import cv2
CV2_AVAILABLE = True
except Exception:
CV2_AVAILABLE = False
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
os.environ["PYTHONHASHSEED"] = str(SEED)
if TORCH_AVAILABLE:
torch.manual_seed(SEED)
torch.set_num_threads(1) # vermeidet non-deterministische Parallel-Reduktionen
# Optional (nur wenn keine Fehlermeldung kommt):
# torch.use_deterministic_algorithms(True)
if CV2_AVAILABLE:
try:
cv2.setNumThreads(1) # OpenCV deterministischer
except Exception:
pass
# Import our modular components
from video_processing import extract_frames_from_video
from ontology_integration import analyze_scene_with_ontology, extract_scene_description
from model_processing import process_frame
from ui_components import (
render_sidebar_config,
render_input_section,
render_prompt_section,
render_process_button,
render_results_header,
render_frame_result,
render_validation_errors,
render_instructions
)
# Try to import local models, fall back gracefully if not available
try:
from local_models import get_local_model_manager
LOCAL_MODELS_AVAILABLE = True
except ImportError as e:
LOCAL_MODELS_AVAILABLE = False
print(f"Local models not available: {e}")
def get_local_model_manager():
return None
# Load environment variables
load_dotenv()
def load_settings():
"""Load settings from JSON file"""
try:
with open('settings.json', 'r') as f:
return json.load(f)
except FileNotFoundError:
return {}
@st.cache_resource
def initialize_local_models():
"""Initialize local model manager"""
return get_local_model_manager()
def initialize_app():
"""Initialize the Streamlit application"""
st.set_page_config(
page_title="Masterarbeit – Prototyp zur Bahngleiserfassung",
page_icon="🎥",
layout="wide"
)
st.title("🎥 Masterarbeit – Prototyp zur Bahngleiserfassung")
st.markdown(" Dieses Tool wurde im Rahmen einer Masterarbeit entwickelt. Es dient zur **Analyse von Videoaufnahmen auf sicherheitskritische Situationen** im Bahnumfeld. Der Prototyp verwendet **lokale KI-Modelle**, um Personen im Gleisbereich zu erkennen, und kombiniert diese Erkennung mit einer ontologiebasierten Risikobewertung zur Einschätzung potenzieller Gefahren.")
def setup_local_models():
"""Setup local models and return availability status"""
local_manager = None
local_models_available = False
if LOCAL_MODELS_AVAILABLE:
try:
local_manager = initialize_local_models()
local_models_available = True
st.success("🤖 Die lokalen Modelle wurden erfolgreich geladen.!")
except Exception as e:
st.warning(f"Local AI models not available: {str(e)}")
st.info("💡 Install AI packages: `pip install torch torchvision transformers accelerate sentencepiece`")
local_models_available = False
else:
st.info("💡 Local AI models not installed. Install with: `pip install torch torchvision transformers accelerate sentencepiece`")
return local_manager, local_models_available
def process_video_frames(video_file, config, local_manager=None):
"""
Process all frames in the video and return results
"""
# Extract frames
frames = extract_frames_from_video(video_file, config["fps"])
if not frames:
st.error("No frames could be extracted from the video")
return []
st.success(f"Extracted {len(frames)} frames from video")
# Process each frame
results = []
progress_bar = st.progress(0)
# Add prompt to config for processing
processing_config = config.copy()
processing_config["prompt"] = config.get("prompt", "")
for i, frame_data in enumerate(frames):
with st.spinner(f"Analyzing frame {i+1}/{len(frames)}..."):
# Process frame with selected model
result = process_frame(frame_data, processing_config, local_manager)
# Extract scene description for ontology analysis
scene_description = extract_scene_description(result)
# Apply ontology analysis
ontology_analysis = analyze_scene_with_ontology(scene_description, config["use_ontology"])
results.append({
'frame_number': frame_data['frame_number'],
'timestamp': frame_data['timestamp'],
'image': frame_data['frame'],
'result': result,
'ontology_analysis': ontology_analysis
})
progress_bar.progress((i + 1) / len(frames))
return results
def validate_inputs(video_file, prompt, config, local_models_available):
"""
Validate all required inputs
"""
model_type = config["model_type"]
selected_model = config["selected_model"]
api_token = config["api_token"]
# Check basic requirements
if not video_file:
return False
# Check prompt requirements
if not prompt and not (model_type == "Local Models" and selected_model == "Person on Track Detector"):
return False
# Check API token for remote models
if not api_token and model_type == "Remote API":
return False
# Check local models availability
if model_type == "Local Models" and not local_models_available:
return False
return True
# --- Passwort-Check (aus secrets oder Env) ---
import os
def check_password() -> bool:
st.sidebar.title("🔐 Zugriff")
password = st.sidebar.text_input("Passwort", type="password")
if password == "rexhbeqaj": # <-- dein Passwort hier
st.sidebar.success("Zugang erlaubt ✅")
return True
elif password:
st.sidebar.error("❌ Falsches Passwort")
return False
def main():
"""Main application entry point"""
# 1) Seite konfigurieren (muss der erste Streamlit-Call sein)
initialize_app()
# 2) Diagnostics anzeigen (Versionen)
with st.expander("Diagnostics (Versionen)"):
versions = {}
try:
import cv2; versions["opencv"] = cv2.__version__
except Exception: versions["opencv"] = "n/a"
try:
import numpy as _np; versions["numpy"] = _np.__version__
except Exception: versions["numpy"] = "n/a"
try:
import torch as _torch; versions["torch"] = _torch.__version__
except Exception: versions["torch"] = "n/a"
try:
import transformers as _tf; versions["transformers"] = _tf.__version__
except Exception: versions["transformers"] = "n/a"
try:
import PIL; versions["pillow"] = PIL.__version__
except Exception: versions["pillow"] = "n/a"
st.write(versions)
# 3) Passwort prüfen
if not check_password():
st.stop()
# 4) Settings & Modelle
settings = load_settings()
local_manager, local_models_available = setup_local_models()
# 5) Layout & UI
col1, col2 = st.columns([1, 1])
with col1:
config = render_sidebar_config(settings, local_models_available, local_manager)
input_data = render_input_section()
video_file = input_data["video_file"]
prompt = render_prompt_section(config)
process_button = render_process_button()
with col2:
results_container = render_results_header()
# 6) Logik
if process_button:
if validate_inputs(video_file, prompt, config, local_models_available):
config["prompt"] = prompt
with st.spinner("Processing video..."):
results = process_video_frames(video_file, config, local_manager)
if results:
with results_container:
st.subheader("Analysis Results")
severity_counts = {}
for r in results:
sev = r['ontology_analysis'].get('severity', 'NONE')
severity_counts[sev] = severity_counts.get(sev, 0) + 1
if config["use_ontology"] and severity_counts:
st.write("**Summary:**")
cols = st.columns(len(severity_counts))
icon = {'NONE':'✅','LOW':'🟢','MEDIUM':'🟠','HIGH':'⚠️','CRITICAL':'🚨'}
for i,(sev,cnt) in enumerate(severity_counts.items()):
with cols[i]:
st.metric(f"{icon.get(sev,'❓')} {sev}", cnt)
st.divider()
for rd in results:
render_frame_result(rd)
else:
render_validation_errors(
video_file, prompt, config["api_token"],
config["model_type"], local_models_available, config["selected_model"]
)
render_instructions()
if __name__ == "__main__":
main()