Migjomatic commited on
Commit
a5ab0c0
·
1 Parent(s): b5c13ab

Deutsche Fassung

Browse files
Files changed (3) hide show
  1. app.py +4 -4
  2. app_original_backup.py +1 -1
  3. ui_components.py +14 -4
app.py CHANGED
@@ -54,13 +54,13 @@ def initialize_local_models():
54
  def initialize_app():
55
  """Initialize the Streamlit application"""
56
  st.set_page_config(
57
- page_title="Video Frame Analyzer with Ontology",
58
  page_icon="🎥",
59
  layout="wide"
60
  )
61
 
62
- st.title("🎥 Video Frame Analyzer with Ontology-Based Risk Assessment")
63
- st.markdown("Upload a video and analyze frames using AI models with ontology-based safety classification")
64
 
65
 
66
  def setup_local_models():
@@ -72,7 +72,7 @@ def setup_local_models():
72
  try:
73
  local_manager = initialize_local_models()
74
  local_models_available = True
75
- st.success("🤖 Local AI models initialized successfully!")
76
  except Exception as e:
77
  st.warning(f"Local AI models not available: {str(e)}")
78
  st.info("💡 Install AI packages: `pip install torch torchvision transformers accelerate sentencepiece`")
 
54
  def initialize_app():
55
  """Initialize the Streamlit application"""
56
  st.set_page_config(
57
+ page_title="Masterarbeit Prototyp zur Bahngleiserfassung",
58
  page_icon="🎥",
59
  layout="wide"
60
  )
61
 
62
+ st.title("🎥 Masterarbeit Prototyp zur Bahngleiserfassung")
63
+ st.markdown(" Dieses Tool wurde im Rahmen einer Masterarbeit entwickelt. Es dient zur **Analyse von Videoaufnahmen auf sicherheitskritische Situationen** im Bahnumfeld. Der Prototyp verwendet **lokale KI-Modelle**, um Personen im Gleisbereich zu erkennen, und kombiniert diese Erkennung mit einer ontologiebasierten Risikobewertung zur Einschätzung potenzieller Gefahren.")
64
 
65
 
66
  def setup_local_models():
 
72
  try:
73
  local_manager = initialize_local_models()
74
  local_models_available = True
75
+ st.success("🤖 Die lokalen Modelle wurden erfolgreich geladen.!")
76
  except Exception as e:
77
  st.warning(f"Local AI models not available: {str(e)}")
78
  st.info("💡 Install AI packages: `pip install torch torchvision transformers accelerate sentencepiece`")
app_original_backup.py CHANGED
@@ -440,7 +440,7 @@ def main():
440
  )
441
 
442
  # Process button
443
- process_button = st.button("Process Video", type="primary")
444
 
445
  with col2:
446
  st.header("Results")
 
440
  )
441
 
442
  # Process button
443
+ process_button = st.button("Video auswählen", type="primary")
444
 
445
  with col2:
446
  st.header("Results")
ui_components.py CHANGED
@@ -23,7 +23,7 @@ def render_sidebar_config(settings: Dict, local_models_available: bool, local_ma
23
  Returns configuration settings
24
  """
25
  with st.sidebar:
26
- st.header("Configuration")
27
 
28
  # Model type selection
29
  available_options = []
@@ -34,7 +34,7 @@ def render_sidebar_config(settings: Dict, local_models_available: bool, local_ma
34
  model_type = st.radio(
35
  "Model Type",
36
  available_options,
37
- help="Choose between local AI models or remote Hugging Face API"
38
  )
39
 
40
  # Model selection based on type
@@ -75,11 +75,21 @@ def render_sidebar_config(settings: Dict, local_models_available: bool, local_ma
75
  def _render_local_model_config(local_manager) -> tuple:
76
  """Render local model configuration"""
77
  available_local_models = local_manager.get_available_models()
 
 
 
 
 
 
 
78
  selected_model = st.selectbox(
79
- "Select Local Model",
80
  options=available_local_models,
 
81
  help="Choose between CNN (fast) or Transformer (detailed) models"
82
  )
 
 
83
 
84
  # Show model info
85
  model_info = local_manager.get_model_info()
@@ -141,7 +151,7 @@ def render_prompt_section(config: Dict[str, Any]) -> str:
141
  if (model_type == "Local Models" and
142
  selected_model == "Person on Track Detector"):
143
  # Person on Track Detector works automatically
144
- st.info("🤖 Person on Track Detector works automatically - no prompt needed!")
145
  return "automatic"
146
  else:
147
  # Regular models need user prompt
 
23
  Returns configuration settings
24
  """
25
  with st.sidebar:
26
+ st.header("Konfiguration")
27
 
28
  # Model type selection
29
  available_options = []
 
34
  model_type = st.radio(
35
  "Model Type",
36
  available_options,
37
+ help="Wähle zwischen den KI-Modellen"
38
  )
39
 
40
  # Model selection based on type
 
75
  def _render_local_model_config(local_manager) -> tuple:
76
  """Render local model configuration"""
77
  available_local_models = local_manager.get_available_models()
78
+
79
+ # Standard-Auswahl auf "Person on Track Detector" setzen (falls vorhanden)
80
+ default_index = (
81
+ available_local_models.index("Person on Track Detector")
82
+ if "Person on Track Detector" in available_local_models else 0
83
+ )
84
+
85
  selected_model = st.selectbox(
86
+ "Lokales Modell auswählen",
87
  options=available_local_models,
88
+ index=default_index, # <-- diese Zeile ist neu
89
  help="Choose between CNN (fast) or Transformer (detailed) models"
90
  )
91
+
92
+
93
 
94
  # Show model info
95
  model_info = local_manager.get_model_info()
 
151
  if (model_type == "Local Models" and
152
  selected_model == "Person on Track Detector"):
153
  # Person on Track Detector works automatically
154
+ st.info("🤖Das Modell ist mit einem ontologiebasierten Ansatz gefüttert und erfordert keinen Prompt")
155
  return "automatic"
156
  else:
157
  # Regular models need user prompt