Sompote commited on
Commit
9570014
Β·
verified Β·
1 Parent(s): 2612910

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +1570 -52
app.py CHANGED
@@ -1,62 +1,1580 @@
1
- #!/usr/bin/env python3
2
- """
3
- Soil Boring Log Analyzer - Hugging Face Spaces Version
4
- Optimized for deployment on Hugging Face Spaces with Streamlit
5
- """
6
-
7
  import streamlit as st
 
8
  import os
9
- import shutil
10
- from pathlib import Path
11
-
12
- # Hugging Face Spaces Setup
13
- def setup_hf_environment():
14
- """Setup environment for Hugging Face Spaces"""
15
- # Create .env file from template if it doesn't exist
16
- if not os.path.exists('.env') and os.path.exists('.env_template'):
17
- shutil.copy('.env_template', '.env')
18
- st.info("πŸ”§ Environment template created. Please configure your API keys in the sidebar.")
19
-
20
- # Initialize HF environment
21
- setup_hf_environment()
22
-
23
- # Import main app after environment setup
24
- from app import main
25
 
26
- # Hugging Face Spaces Configuration
27
  st.set_page_config(
28
- page_title="πŸ—οΈ Soil Boring Log Analyzer",
29
  page_icon="πŸ—οΈ",
30
  layout="wide",
31
- initial_sidebar_state="expanded",
32
- menu_items={
33
- 'Get Help': 'https://huggingface.co/spaces/your-username/soil-boring-analyzer',
34
- 'Report a bug': 'https://huggingface.co/spaces/your-username/soil-boring-analyzer/discussions',
35
- 'About': """
36
- # πŸ—οΈ Soil Boring Log Analyzer
37
-
38
- An AI-powered application for analyzing soil boring logs using multiple LLM providers.
39
-
40
- **Features:**
41
- - Multi-LLM Support (OpenRouter, Anthropic, Google)
42
- - PDF/Image document processing
43
- - Professional soil analysis
44
- - Interactive visualizations
45
-
46
- **Powered by:** Streamlit, LangGraph, CrewAI
47
- """
48
- }
49
  )
50
 
51
- # Add Hugging Face Spaces header
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  if __name__ == "__main__":
53
- with st.container():
54
- st.markdown("""
55
- <div style='text-align: center; padding: 1rem; background: linear-gradient(90deg, #ff6b6b, #4ecdc4); color: white; border-radius: 10px; margin-bottom: 1rem;'>
56
- <h2>πŸ—οΈ Soil Boring Log Analyzer</h2>
57
- <p>AI-Powered Geotechnical Analysis | Powered by Multiple LLM Providers</p>
58
- </div>
59
- """, unsafe_allow_html=True)
60
-
61
- # Run main application
62
  main()
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import json
3
  import os
4
+ from document_processor import DocumentProcessor
5
+ from langgraph_agent import SoilAnalysisAgent
6
+ from crewai_agents import CrewAIGeotechSystem
7
+ from soil_visualizer import SoilProfileVisualizer
8
+ try:
9
+ from config import (
10
+ LLM_PROVIDERS, AVAILABLE_MODELS,
11
+ get_available_providers, get_models_for_provider,
12
+ get_default_provider_and_model, get_api_key
13
+ )
14
+ except ImportError as e:
15
+ st.error(f"Configuration import error: {e}")
16
+ st.stop()
 
 
 
17
 
 
18
  st.set_page_config(
19
+ page_title="Soil Boring Log Analyzer",
20
  page_icon="πŸ—οΈ",
21
  layout="wide",
22
+ initial_sidebar_state="expanded"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  )
24
 
25
+ def setup_llm_provider_gui():
26
+ """Setup GUI for LLM provider and API key management"""
27
+ st.subheader("πŸ”‘ LLM Provider Setup")
28
+
29
+ # Get available providers
30
+ available_providers = get_available_providers()
31
+
32
+ if not available_providers:
33
+ st.warning("⚠️ No API keys found. Please configure at least one LLM provider:")
34
+
35
+ # Show setup for each provider
36
+ for provider_id, provider_info in LLM_PROVIDERS.items():
37
+ with st.expander(f"πŸ”§ Setup {provider_info['name']}"):
38
+ st.markdown(f"**{provider_info['description']}**")
39
+
40
+ # API key input
41
+ api_key_input = st.text_input(
42
+ f"Enter your {provider_info['name']} API Key:",
43
+ type="password",
44
+ placeholder=get_api_key_placeholder(provider_id),
45
+ help=get_provider_help_text(provider_id),
46
+ key=f"api_key_{provider_id}"
47
+ )
48
+
49
+ if st.button(f"πŸ’Ύ Save {provider_info['name']} Key", key=f"save_{provider_id}"):
50
+ if api_key_input and validate_api_key_format(provider_id, api_key_input):
51
+ try:
52
+ save_provider_key_to_env(provider_id, api_key_input)
53
+ st.success(f"βœ… {provider_info['name']} API key saved successfully!")
54
+ st.info("πŸ”„ Reloading application to activate your new API key...")
55
+ st.rerun()
56
+ except Exception as e:
57
+ st.error(f"❌ Failed to save API key: {str(e)}")
58
+ else:
59
+ st.error(f"❌ Invalid API key format for {provider_info['name']}")
60
+ else:
61
+ # Show configured providers
62
+ st.success(f"βœ… {len(available_providers)} provider(s) configured")
63
+
64
+ for provider_id in available_providers:
65
+ provider_info = LLM_PROVIDERS[provider_id]
66
+ current_key = get_api_key(provider_id)
67
+ masked_key = mask_api_key(current_key)
68
+
69
+ # Check if user wants to change this provider's key
70
+ if st.session_state.get(f'show_key_input_{provider_id}'):
71
+ # Show input form for changing API key
72
+ with st.expander(f"πŸ”§ Change {provider_info['name']} API Key", expanded=True):
73
+ new_api_key = st.text_input(
74
+ f"Enter new {provider_info['name']} API Key:",
75
+ type="password",
76
+ placeholder=get_api_key_placeholder(provider_id),
77
+ key=f"new_key_{provider_id}"
78
+ )
79
+
80
+ col1, col2 = st.columns(2)
81
+ with col1:
82
+ if st.button(f"πŸ’Ύ Update {provider_info['name']} Key", key=f"update_{provider_id}"):
83
+ if new_api_key and validate_api_key_format(provider_id, new_api_key):
84
+ try:
85
+ save_provider_key_to_env(provider_id, new_api_key)
86
+ st.success(f"βœ… {provider_info['name']} API key updated successfully!")
87
+ st.session_state[f'show_key_input_{provider_id}'] = False
88
+ st.rerun()
89
+ except Exception as e:
90
+ st.error(f"❌ Failed to update API key: {str(e)}")
91
+ else:
92
+ st.error(f"❌ Invalid API key format for {provider_info['name']}")
93
+
94
+ with col2:
95
+ if st.button("❌ Cancel", key=f"cancel_change_{provider_id}"):
96
+ st.session_state[f'show_key_input_{provider_id}'] = False
97
+ st.rerun()
98
+ else:
99
+ # Show normal display with change button
100
+ col1, col2 = st.columns([3, 1])
101
+ with col1:
102
+ st.info(f"**{provider_info['name']}**: {masked_key}")
103
+ with col2:
104
+ if st.button(f"πŸ”„ Change", key=f"change_{provider_id}"):
105
+ st.session_state[f'show_key_input_{provider_id}'] = True
106
+ st.rerun()
107
+
108
+ # Add new provider section
109
+ st.markdown("---")
110
+ if st.button("βž• Add Another Provider"):
111
+ st.session_state.show_add_provider = True
112
+ st.rerun()
113
+
114
+ if st.session_state.get('show_add_provider'):
115
+ setup_additional_provider()
116
+
117
+ def get_current_provider_and_model():
118
+ """Get current provider and model from session state or defaults"""
119
+ # Check session state first
120
+ if 'selected_provider' in st.session_state and 'selected_model' in st.session_state:
121
+ return st.session_state.selected_provider, st.session_state.selected_model
122
+
123
+ # Get defaults based on available providers
124
+ try:
125
+ return get_default_provider_and_model()
126
+ except:
127
+ # If no providers available, return None
128
+ return None, None
129
+
130
+ def get_api_key_for_current_provider():
131
+ """Get API key for currently selected provider"""
132
+ provider, _ = get_current_provider_and_model()
133
+ if provider:
134
+ return get_api_key(provider)
135
+ return None
136
+
137
+ def refresh_provider_status():
138
+ """Force refresh of provider status after API key changes"""
139
+ from dotenv import load_dotenv
140
+ # Reload environment variables to pick up any .env file changes
141
+ load_dotenv(override=True)
142
+
143
+ # Only clear cached states that are specifically related to stale API operations
144
+ # Don't clear active user interface states like show_key_input_
145
+ pass
146
+
147
+ def get_api_key_placeholder(provider_id):
148
+ """Get placeholder text for API key input"""
149
+ placeholders = {
150
+ "openrouter": "sk-or-v1-...",
151
+ "anthropic": "sk-ant-...",
152
+ "google": "AIza..."
153
+ }
154
+ return placeholders.get(provider_id, "Enter your API key...")
155
+
156
+ def get_provider_help_text(provider_id):
157
+ """Get help text for each provider"""
158
+ help_texts = {
159
+ "openrouter": "Get your API key from https://openrouter.ai/keys",
160
+ "anthropic": "Get your API key from https://console.anthropic.com/",
161
+ "google": "Get your API key from https://aistudio.google.com/app/apikey"
162
+ }
163
+ return help_texts.get(provider_id, "")
164
+
165
+ def validate_api_key_format(provider_id, api_key):
166
+ """Validate API key format for different providers"""
167
+ if not api_key:
168
+ return False
169
+
170
+ validation_patterns = {
171
+ "openrouter": lambda key: key.startswith("sk-or-"),
172
+ "anthropic": lambda key: key.startswith("sk-ant-"),
173
+ "google": lambda key: key.startswith("AIza") or key.startswith("GoogleAPIKey")
174
+ }
175
+
176
+ validator = validation_patterns.get(provider_id)
177
+ if validator:
178
+ return validator(api_key)
179
+ return True # Default to True for unknown providers
180
+
181
+ def mask_api_key(api_key):
182
+ """Mask API key for display"""
183
+ if not api_key:
184
+ return "Not configured"
185
+ if len(api_key) > 12:
186
+ return api_key[:8] + "..." + api_key[-4:]
187
+ return "***configured***"
188
+
189
+ def save_provider_key_to_env(provider_id, api_key):
190
+ """Save provider API key to .env file"""
191
+ env_var = LLM_PROVIDERS[provider_id]["api_key_env"]
192
+ env_path = ".env"
193
+
194
+ try:
195
+ # Read existing content
196
+ env_content = ""
197
+ if os.path.exists(env_path):
198
+ with open(env_path, 'r') as f:
199
+ env_content = f.read()
200
+
201
+ # Update or add the API key
202
+ lines = env_content.split('\n')
203
+ updated_lines = []
204
+ key_found = False
205
+
206
+ for line in lines:
207
+ # Check for existing key (both active and commented)
208
+ if line.startswith(f"{env_var}=") or line.startswith(f"# {env_var}="):
209
+ # Replace with active key
210
+ updated_lines.append(f"{env_var}={api_key}")
211
+ key_found = True
212
+ else:
213
+ updated_lines.append(line)
214
+
215
+ if not key_found:
216
+ # Add new key if not found
217
+ if updated_lines and updated_lines[-1] != '':
218
+ updated_lines.append('')
219
+ updated_lines.append(f"{env_var}={api_key}")
220
+
221
+ # Write to file
222
+ with open(env_path, 'w') as f:
223
+ f.write('\n'.join(updated_lines))
224
+
225
+ # CRITICAL FIX: Reload environment variables immediately
226
+ # Set the environment variable directly in the current process
227
+ os.environ[env_var] = api_key
228
+
229
+ # Also reload the .env file to pick up any other changes
230
+ from dotenv import load_dotenv
231
+ load_dotenv(override=True)
232
+
233
+ except Exception as e:
234
+ raise Exception(f"Failed to save {env_var}: {str(e)}")
235
+
236
+ def setup_additional_provider():
237
+ """Setup additional provider interface"""
238
+ st.subheader("βž• Add Another Provider")
239
+
240
+ # Get providers not yet configured
241
+ available_providers = get_available_providers()
242
+ unconfigured_providers = [p for p in LLM_PROVIDERS.keys() if p not in available_providers]
243
+
244
+ if not unconfigured_providers:
245
+ st.info("βœ… All providers are already configured!")
246
+ st.success("πŸŽ‰ You have access to all available LLM providers")
247
+ if st.button("❌ Close"):
248
+ st.session_state.show_add_provider = False
249
+ st.rerun()
250
+ return
251
+
252
+ # Provider selection
253
+ provider_names = {p: LLM_PROVIDERS[p]["name"] for p in unconfigured_providers}
254
+ selected_provider_name = st.selectbox(
255
+ "Select Provider to Configure:",
256
+ options=list(provider_names.values())
257
+ )
258
+
259
+ # Find provider ID from name
260
+ selected_provider_id = None
261
+ for pid, pname in provider_names.items():
262
+ if pname == selected_provider_name:
263
+ selected_provider_id = pid
264
+ break
265
+
266
+ if selected_provider_id:
267
+ provider_info = LLM_PROVIDERS[selected_provider_id]
268
+ st.markdown(f"**{provider_info['description']}**")
269
+
270
+ # API key input
271
+ api_key_input = st.text_input(
272
+ f"Enter your {provider_info['name']} API Key:",
273
+ type="password",
274
+ placeholder=get_api_key_placeholder(selected_provider_id),
275
+ help=get_provider_help_text(selected_provider_id),
276
+ key=f"add_api_key_{selected_provider_id}"
277
+ )
278
+
279
+ col1, col2 = st.columns(2)
280
+ with col1:
281
+ if st.button(f"πŸ’Ύ Save {provider_info['name']} Key"):
282
+ if api_key_input and validate_api_key_format(selected_provider_id, api_key_input):
283
+ try:
284
+ save_provider_key_to_env(selected_provider_id, api_key_input)
285
+ st.success(f"βœ… {provider_info['name']} API key saved successfully!")
286
+ st.info("πŸ”„ Reloading application to activate your new API key...")
287
+ st.session_state.show_add_provider = False
288
+ st.rerun()
289
+ except Exception as e:
290
+ st.error(f"❌ Failed to save API key: {str(e)}")
291
+ else:
292
+ st.error(f"❌ Invalid API key format for {provider_info['name']}")
293
+
294
+ with col2:
295
+ if st.button("❌ Cancel"):
296
+ st.session_state.show_add_provider = False
297
+ st.rerun()
298
+
299
+
300
+ def initialize_crewai_system():
301
+ """Initialize CrewAI system with current settings"""
302
+ provider, model = get_current_provider_and_model()
303
+ if not provider or not model:
304
+ return
305
+
306
+ selected_model = st.session_state.get('selected_model', model)
307
+ current_api_key = get_api_key_for_current_provider()
308
+
309
+ # If no API key is available, pass empty string to trigger mock mode
310
+ if not current_api_key or not current_api_key.strip():
311
+ current_api_key = ""
312
+
313
+ st.session_state.crewai_system = CrewAIGeotechSystem(
314
+ model=selected_model,
315
+ api_key=current_api_key
316
+ )
317
+
318
+ def run_crewai_analysis(text_content, image_base64, merge_similar, split_thick):
319
+ """Run CrewAI analysis workflow"""
320
+ try:
321
+ from unified_soil_workflow import UnifiedSoilWorkflow
322
+
323
+ workflow = UnifiedSoilWorkflow()
324
+ provider, model = get_current_provider_and_model()
325
+ selected_model = st.session_state.get('selected_model', model)
326
+ current_api_key = get_api_key_for_current_provider()
327
+
328
+ # Get initial soil data
329
+ soil_data = workflow.analyze_soil_boring_log(
330
+ text_content=text_content,
331
+ image_base64=image_base64,
332
+ model=selected_model,
333
+ api_key=current_api_key,
334
+ merge_similar=merge_similar,
335
+ split_thick=split_thick
336
+ )
337
+
338
+ if "error" in soil_data:
339
+ st.error(f"❌ Initial Analysis Error: {soil_data['error']}")
340
+ return None
341
+
342
+ # Re-initialize CrewAI system with current settings
343
+ initialize_crewai_system()
344
+
345
+ # Show warning if using mock mode
346
+ if not current_api_key or current_api_key.strip() == "":
347
+ st.warning("⚠️ No API key available. Using mock analysis for demonstration purposes.")
348
+
349
+ # Run CrewAI analysis
350
+ crewai_results = st.session_state.crewai_system.run_geotechnical_analysis(soil_data)
351
+
352
+ # Package results for display
353
+ analysis_results = {
354
+ "soil_data": soil_data,
355
+ "analysis_results": {
356
+ "validation_stats": soil_data.get("validation_stats", {}),
357
+ "optimization": soil_data.get("optimization_results", {}),
358
+ "crewai_analysis": crewai_results
359
+ }
360
+ }
361
+
362
+ st.session_state.analysis_results = analysis_results
363
+
364
+ # Display success message
365
+ layer_count = len(soil_data.get("soil_layers", []))
366
+ workflow_status = crewai_results.get("status", "unknown")
367
+
368
+ if workflow_status == "completed_with_revision":
369
+ st.success(f"πŸŽ‰ CrewAI analysis completed with quality control revision! Found {layer_count} soil layers")
370
+ st.info("πŸ“‹ Senior engineer review required re-investigation - final analysis is more accurate")
371
+ elif workflow_status == "error":
372
+ st.error(f"❌ CrewAI analysis failed: {crewai_results.get('error', 'Unknown error')}")
373
+ else:
374
+ st.success(f"πŸŽ‰ CrewAI analysis completed! Found {layer_count} soil layers")
375
+ st.info("βœ… Analysis passed senior engineer review on first attempt")
376
+
377
+ return crewai_results
378
+
379
+ except Exception as e:
380
+ st.error(f"❌ CrewAI workflow error: {str(e)}")
381
+ return None
382
+
383
+ def run_langgraph_analysis(text_content, image_base64):
384
+ """Run LangGraph agent analysis"""
385
+ agent_results = st.session_state.agent.run_analysis(
386
+ text_content=text_content,
387
+ image_base64=image_base64
388
+ )
389
+
390
+ st.session_state.analysis_results = agent_results
391
+ return agent_results
392
+
393
+ def run_unified_workflow_analysis(text_content, image_base64, merge_similar, split_thick):
394
+ """Run unified workflow analysis"""
395
+ from unified_soil_workflow import UnifiedSoilWorkflow
396
+
397
+ # Initialize workflow
398
+ workflow = UnifiedSoilWorkflow()
399
+
400
+ # Get configuration
401
+ provider, model = get_current_provider_and_model()
402
+ selected_model = st.session_state.get('selected_model', model)
403
+ current_api_key = get_api_key_for_current_provider()
404
+
405
+ # Run unified workflow
406
+ soil_data = workflow.analyze_soil_boring_log(
407
+ text_content=text_content,
408
+ image_base64=image_base64,
409
+ model=selected_model,
410
+ api_key=current_api_key,
411
+ merge_similar=merge_similar,
412
+ split_thick=split_thick
413
+ )
414
+
415
+ # Check if analysis was successful
416
+ if "error" in soil_data:
417
+ st.error(f"❌ Unified Workflow Error: {soil_data['error']}")
418
+ if "raw_response" in soil_data:
419
+ with st.expander("πŸ” View Raw LLM Response"):
420
+ st.text(soil_data["raw_response"])
421
+ if "errors" in soil_data:
422
+ st.error("Detailed errors:")
423
+ for error in soil_data["errors"]:
424
+ st.error(f" β€’ {error}")
425
+ return
426
+
427
+ # Package results for display (compatible with existing UI)
428
+ analysis_results = {
429
+ "soil_data": soil_data,
430
+ "analysis_results": {
431
+ "validation_stats": soil_data.get("validation_stats", {}),
432
+ "optimization": soil_data.get("optimization_results", {})
433
+ }
434
+ }
435
+
436
+ st.session_state.analysis_results = analysis_results
437
+
438
+ # Display success message with workflow metadata
439
+ workflow_meta = soil_data.get("workflow_metadata", {})
440
+ layer_count = len(soil_data.get("soil_layers", []))
441
+ ss_count = workflow_meta.get("ss_samples", 0)
442
+ st_count = workflow_meta.get("st_samples", 0)
443
+
444
+ st.success(f"πŸŽ‰ Unified workflow completed! Found {layer_count} soil layers")
445
+ st.info(f"πŸ“Š Processing: {ss_count} SS samples, {st_count} ST samples, {workflow_meta.get('processing_steps', 9)} workflow steps")
446
+
447
+ def main():
448
+
449
+ st.title("πŸ—οΈ Soil Boring Log Analyzer")
450
+ st.markdown("Upload soil boring logs (PDF/Image) to automatically extract and analyze soil layers using AI")
451
+
452
+ # Force refresh provider status on each run to catch any newly saved API keys
453
+ refresh_provider_status()
454
+
455
+ # Show system status
456
+ available_providers = get_available_providers()
457
+ if available_providers:
458
+ provider_names = [LLM_PROVIDERS[p]["name"] for p in available_providers]
459
+ st.success(f"βœ… **Ready to use** - Configured providers: {', '.join(provider_names)}")
460
+ else:
461
+ st.info("πŸ”§ **Setup Required** - Please configure at least one LLM provider below to start analyzing soil boring logs")
462
+
463
+ # LLM Provider Management
464
+ available_providers = get_available_providers()
465
+
466
+ if not available_providers:
467
+ st.error("⚠️ At least one LLM provider API key is required to use this application")
468
+ setup_llm_provider_gui()
469
+ return
470
+
471
+ # Show provider management in sidebar
472
+ with st.sidebar:
473
+ st.markdown("---")
474
+ setup_llm_provider_gui()
475
+
476
+ # Initialize components lazily
477
+ if 'document_processor' not in st.session_state:
478
+ st.session_state.document_processor = DocumentProcessor()
479
+
480
+ if 'agent' not in st.session_state:
481
+ st.session_state.agent = SoilAnalysisAgent()
482
+
483
+ if 'visualizer' not in st.session_state:
484
+ st.session_state.visualizer = SoilProfileVisualizer()
485
+
486
+ if 'analysis_results' not in st.session_state:
487
+ st.session_state.analysis_results = None
488
+
489
+ # Sidebar
490
+ with st.sidebar:
491
+ st.header("Upload Document")
492
+ uploaded_file = st.file_uploader(
493
+ "Choose a soil boring log file",
494
+ type=['pdf', 'png', 'jpg', 'jpeg'],
495
+ help="Upload PDF or image file of soil boring log"
496
+ )
497
+
498
+ st.header("Analysis Options")
499
+ merge_similar = st.checkbox("Merge similar layers", value=True)
500
+ split_thick = st.checkbox("Split thick layers", value=True)
501
+
502
+ st.subheader("πŸ€– Analysis Method")
503
+ analysis_method = st.radio(
504
+ "Choose analysis approach:",
505
+ ["CrewAI (Two-Agent System)", "LangGraph (Single Agent)", "Unified Workflow"],
506
+ help="CrewAI uses two specialized agents with quality control"
507
+ )
508
+
509
+ # Provider and Model selection
510
+ st.subheader("πŸ€– LLM Provider & Model Selection")
511
+
512
+ # Get available providers
513
+ available_providers = get_available_providers()
514
+
515
+ if not available_providers:
516
+ st.error("⚠️ No LLM providers configured. Please set up at least one provider in the sidebar.")
517
+ return
518
+
519
+ # Provider selection
520
+ provider_names = {p: LLM_PROVIDERS[p]["name"] for p in available_providers}
521
+ current_provider, current_model = get_current_provider_and_model()
522
+
523
+ # Default provider selection
524
+ default_provider_name = None
525
+ if current_provider and current_provider in provider_names:
526
+ default_provider_name = provider_names[current_provider]
527
+ elif provider_names:
528
+ default_provider_name = list(provider_names.values())[0]
529
+
530
+ selected_provider_name = st.selectbox(
531
+ "Select LLM Provider:",
532
+ options=list(provider_names.values()),
533
+ index=list(provider_names.values()).index(default_provider_name) if default_provider_name else 0,
534
+ help="Choose your preferred LLM provider"
535
+ )
536
+
537
+ # Find provider ID from name
538
+ selected_provider = None
539
+ for pid, pname in provider_names.items():
540
+ if pname == selected_provider_name:
541
+ selected_provider = pid
542
+ break
543
+
544
+ # Model selection for selected provider
545
+ if selected_provider:
546
+ available_models = get_models_for_provider(selected_provider)
547
+
548
+ if available_models:
549
+ # Create model options for this provider
550
+ model_options = {}
551
+ for model_id, model_info in available_models.items():
552
+ label = f"{model_info['name']} ({model_info['cost']} cost)"
553
+ if model_info['recommended']:
554
+ label += " ⭐"
555
+ if not model_info.get('supports_images', False):
556
+ label += " πŸ“"
557
+ model_options[label] = model_id
558
+
559
+ # Default model selection
560
+ default_model_label = None
561
+ if current_model and current_model in available_models:
562
+ for label, model_id in model_options.items():
563
+ if model_id == current_model:
564
+ default_model_label = label
565
+ break
566
+
567
+ if not default_model_label and model_options:
568
+ default_model_label = list(model_options.keys())[0]
569
+
570
+ selected_label = st.selectbox(
571
+ f"Select Model for {selected_provider_name}:",
572
+ options=list(model_options.keys()),
573
+ index=list(model_options.keys()).index(default_model_label) if default_model_label else 0,
574
+ help="⭐ = Recommended | πŸ“ = Text-only (no image support)"
575
+ )
576
+
577
+ selected_model = model_options[selected_label]
578
+
579
+ # Show model info
580
+ if selected_model in AVAILABLE_MODELS:
581
+ model_info = AVAILABLE_MODELS[selected_model]
582
+ st.info(f"πŸ’‘ {model_info['description']}")
583
+
584
+ # Show provider info
585
+ provider_info = LLM_PROVIDERS[selected_provider]
586
+ st.info(f"πŸ”— Using {provider_info['name']}: {provider_info['description']}")
587
+
588
+ # Show image support status
589
+ if model_info.get('supports_images', False):
590
+ st.success("πŸ–ΌοΈ This model supports both text and image analysis")
591
+ else:
592
+ st.warning("πŸ“ This model supports text-only analysis (images will be ignored)")
593
+ else:
594
+ st.error(f"No models available for {selected_provider_name}")
595
+ selected_model = None
596
+ else:
597
+ selected_model = None
598
+
599
+ # Store selections in session state
600
+ st.session_state.selected_provider = selected_provider
601
+ st.session_state.selected_model = selected_model
602
+
603
+ if st.button("πŸ”„ Reset Analysis"):
604
+ st.session_state.analysis_results = None
605
+ st.rerun()
606
+
607
+ st.markdown("---")
608
+ st.subheader("πŸš€ Unified Workflow Info")
609
+ if st.button("πŸ“‹ View Workflow Steps"):
610
+ from unified_soil_workflow import UnifiedSoilWorkflow
611
+ workflow = UnifiedSoilWorkflow()
612
+ workflow_info = workflow.get_workflow_visualization()
613
+ st.markdown(workflow_info)
614
+
615
+ st.markdown("---")
616
+ st.subheader("πŸ§ͺ Test with Sample Data")
617
+ if st.button("πŸ“ Load Sample Boring Log"):
618
+ sample_text = '''SOIL BORING LOG
619
+ Project: Sample Geotechnical Investigation
620
+ Boring: BH-01
621
+ Location: Main Street, Sample City
622
+ Date: 2024-06-24
623
+ Depth: 15.0m
624
+
625
+ DEPTH (m) | SOIL DESCRIPTION | SPT-N | Su (kPa)
626
+ 0.0-1.5 | Brown silty clay, soft, high plasticity | 4 | -
627
+ 1.5-3.0 | Gray clay, medium stiff, wet | 8 | -
628
+ 3.0-6.0 | Fine to medium sand, loose to medium dense | 12 | -
629
+ 6.0-9.0 | Stiff clay, gray, low plasticity | 18 | -
630
+ 9.0-12.0 | Coarse sand and gravel, dense | 35 | -
631
+ 12.0-15.0 | Very stiff clay, dark gray | 30 | -
632
+
633
+ Water table encountered at 2.8m depth.
634
+ Notes: All strength values from SPT testing. Su calculated using Su=5*N for clay layers.
635
+ '''
636
+
637
+ with st.spinner("Analyzing sample data with unified workflow..."):
638
+ try:
639
+ from unified_soil_workflow import UnifiedSoilWorkflow
640
+
641
+ # Initialize workflow
642
+ workflow = UnifiedSoilWorkflow()
643
+
644
+ # Use selected model and current API key
645
+ provider, model = get_current_provider_and_model()
646
+ selected_model = st.session_state.get('selected_model', model)
647
+ current_api_key = get_api_key_for_current_provider()
648
+
649
+ # Run unified workflow on sample data
650
+ soil_data = workflow.analyze_soil_boring_log(
651
+ text_content=sample_text,
652
+ model=selected_model,
653
+ api_key=current_api_key
654
+ )
655
+
656
+ if "error" not in soil_data and "soil_layers" in soil_data:
657
+ # Package results for display
658
+ analysis_results = {
659
+ "soil_data": soil_data,
660
+ "analysis_results": {
661
+ "validation_stats": soil_data.get("validation_stats", {}),
662
+ "optimization": soil_data.get("optimization_results", {})
663
+ }
664
+ }
665
+
666
+ st.session_state.analysis_results = analysis_results
667
+
668
+ layer_count = len(soil_data["soil_layers"])
669
+ workflow_meta = soil_data.get("workflow_metadata", {})
670
+ st.success(f"βœ… Sample analysis completed! Found {layer_count} layers using unified workflow.")
671
+ st.info(f"πŸ“Š Sample processing: {workflow_meta.get('ss_samples', 0)} SS, {workflow_meta.get('st_samples', 0)} ST samples")
672
+ st.rerun()
673
+ else:
674
+ st.error("❌ Sample analysis failed")
675
+ if "errors" in soil_data:
676
+ for error in soil_data["errors"]:
677
+ st.error(f" β€’ {error}")
678
+ except Exception as e:
679
+ st.error(f"❌ Sample analysis error: {str(e)}")
680
+
681
+ # Main content
682
+ if uploaded_file is not None:
683
+ # Process document
684
+ with st.spinner("Processing document..."):
685
+ text_content, images, image_base64 = st.session_state.document_processor.process_uploaded_file(uploaded_file)
686
+
687
+ # Display uploaded content
688
+ col1, col2 = st.columns([1, 1])
689
+
690
+ with col1:
691
+ st.subheader("πŸ“„ Document Content")
692
+ if text_content:
693
+ st.text_area("Extracted Text", text_content, height=200)
694
+ else:
695
+ st.info("No text extracted (image-only analysis)")
696
+
697
+ with col2:
698
+ st.subheader("πŸ–ΌοΈ Document Image")
699
+ if images:
700
+ st.image(images[0], caption="Soil Boring Log", use_column_width=True)
701
+
702
+ # Analyze button
703
+ if st.button("πŸ” Analyze Soil Layers", type="primary"):
704
+
705
+ if analysis_method == "CrewAI (Two-Agent System)":
706
+ with st.spinner("Running CrewAI two-agent geotechnical analysis..."):
707
+ # Show unit conversion warning
708
+ st.warning("⚠️ **UNIT CONVERSION ALERT**: CrewAI agents will carefully check unit conversions, especially Su values. Ensure your data uses correct units: t/mΒ² β†’ kPa (multiply by 9.81)")
709
+ st.warning("πŸ“ **LAYER SPLITTING ALERT**: CrewAI agents will analyze Su value consistency within layers and split layers when Su values vary by >30% or have >2x ratio")
710
+
711
+ try:
712
+ # Run CrewAI analysis workflow
713
+ run_crewai_analysis(
714
+ text_content, image_base64, merge_similar, split_thick
715
+ )
716
+
717
+ except Exception as e:
718
+ st.error(f"❌ CrewAI analysis failed: {str(e)}")
719
+ import traceback
720
+ st.error("πŸ“‹ Full error details:")
721
+ st.code(traceback.format_exc())
722
+
723
+ elif analysis_method == "LangGraph (Single Agent)":
724
+ with st.spinner("Running LangGraph single agent analysis..."):
725
+ try:
726
+ # Run LangGraph agent analysis
727
+ agent_results = run_langgraph_analysis(text_content, image_base64)
728
+ layer_count = len(agent_results.get("soil_data", {}).get("soil_layers", []))
729
+ st.success(f"πŸŽ‰ LangGraph analysis completed! Found {layer_count} soil layers")
730
+
731
+ except Exception as e:
732
+ st.error(f"❌ LangGraph analysis failed: {str(e)}")
733
+
734
+ else: # Unified Workflow
735
+ with st.spinner("Running unified soil analysis workflow..."):
736
+ try:
737
+ # Run unified workflow analysis
738
+ run_unified_workflow_analysis(
739
+ text_content, image_base64, merge_similar, split_thick
740
+ )
741
+ except Exception as e:
742
+ st.error(f"❌ Unified workflow failed: {str(e)}")
743
+
744
+ # Display results
745
+ if st.session_state.analysis_results:
746
+ display_analysis_results()
747
+
748
+ def display_analysis_results():
749
+ """Display the analysis results"""
750
+ results = st.session_state.analysis_results
751
+
752
+ # Handle both old agent format and new direct format
753
+ if "soil_data" in results:
754
+ soil_data = results["soil_data"]
755
+ analysis_results = results.get("analysis_results", {})
756
+ else:
757
+ # Legacy format from agent
758
+ soil_data = results.get("soil_data", {})
759
+ analysis_results = results.get("analysis_results", {})
760
+
761
+ if "error" in soil_data:
762
+ st.error(f"Analysis Error: {soil_data['error']}")
763
+ if "raw_response" in soil_data:
764
+ with st.expander("Raw LLM Response"):
765
+ st.text(soil_data["raw_response"])
766
+ return
767
+
768
+ # Display validation recommendations if any
769
+ validation_recs = soil_data.get("validation_recommendations", {})
770
+ if validation_recs:
771
+ display_validation_recommendations(validation_recs)
772
+
773
+ # Tabs for different views - add CrewAI tab if CrewAI results exist
774
+ tabs = ["πŸ“Š Soil Profile", "πŸ“‹ Layer Details", "πŸ§ͺ SS/ST Processing", "πŸ”§ Optimization", "🎯 Nearest Neighbors", "πŸ’‘ Insights", "πŸ“ Export"]
775
+
776
+ # Add CrewAI tab if CrewAI analysis was performed
777
+ if analysis_results.get("crewai_analysis"):
778
+ tabs.insert(-1, "πŸ€– CrewAI Analysis") # Insert before Export tab
779
+
780
+ if len(tabs) == 8:
781
+ tab1, tab2, tab3, tab4, tab5, tab6, tab7, tab8 = st.tabs(tabs)
782
+ else:
783
+ tab1, tab2, tab3, tab4, tab5, tab6, tab7 = st.tabs(tabs)
784
+
785
+ with tab1:
786
+ display_soil_profile(soil_data)
787
+
788
+ with tab2:
789
+ display_layer_details(soil_data)
790
+
791
+ with tab3:
792
+ display_ss_st_processing(soil_data)
793
+
794
+ with tab4:
795
+ display_optimization_results(analysis_results)
796
+
797
+ with tab5:
798
+ display_nearest_neighbor_analysis(analysis_results)
799
+
800
+ with tab6:
801
+ display_insights(analysis_results)
802
+
803
+ if len(tabs) == 8:
804
+ with tab7:
805
+ display_crewai_analysis(analysis_results)
806
+
807
+ with tab8:
808
+ display_export_options(soil_data)
809
+ else:
810
+ with tab7:
811
+ display_export_options(soil_data)
812
+
813
+ def display_soil_profile(soil_data):
814
+ """Display soil profile visualization"""
815
+ st.subheader("Soil Profile Visualization")
816
+
817
+ if "soil_layers" not in soil_data or not soil_data["soil_layers"]:
818
+ st.warning("No soil layers found in analysis")
819
+ return
820
+
821
+ col1, col2 = st.columns([1, 1])
822
+
823
+ with col1:
824
+ # Soil profile plot
825
+ profile_fig = st.session_state.visualizer.create_soil_profile_plot(soil_data)
826
+ if profile_fig:
827
+ st.plotly_chart(profile_fig, use_container_width=True)
828
+
829
+ with col2:
830
+ # Strength profile plot
831
+ strength_fig = st.session_state.visualizer.create_strength_profile_plot(soil_data)
832
+ if strength_fig:
833
+ st.plotly_chart(strength_fig, use_container_width=True)
834
+
835
+ # Project information
836
+ if "project_info" in soil_data:
837
+ st.subheader("Project Information")
838
+ proj_info = soil_data["project_info"]
839
+
840
+ info_col1, info_col2, info_col3 = st.columns(3)
841
+ with info_col1:
842
+ st.metric("Project", proj_info.get("project_name", "N/A"))
843
+ st.metric("Boring ID", proj_info.get("boring_id", "N/A"))
844
+ with info_col2:
845
+ st.metric("Location", proj_info.get("location", "N/A"))
846
+ st.metric("Date", proj_info.get("date", "N/A"))
847
+ with info_col3:
848
+ st.metric("Total Depth", f"{proj_info.get('depth_total', 0)} m")
849
+ if "water_table" in soil_data and soil_data["water_table"].get("depth"):
850
+ st.metric("Water Table", f"{soil_data['water_table']['depth']} m")
851
+
852
+ def display_layer_details(soil_data):
853
+ """Display detailed layer information"""
854
+ st.subheader("Soil Layer Details")
855
+
856
+ if "soil_layers" not in soil_data or not soil_data["soil_layers"]:
857
+ st.warning("No soil layers found in analysis")
858
+ return
859
+
860
+ # Create summary table
861
+ df = st.session_state.visualizer.create_layer_summary_table(soil_data)
862
+ if df is not None:
863
+ st.dataframe(df, use_container_width=True)
864
+
865
+ # Individual layer cards
866
+ st.subheader("Layer Details")
867
+ for i, layer in enumerate(soil_data["soil_layers"]):
868
+ with st.expander(f"Layer {layer.get('layer_id', i+1)}: {layer.get('soil_type', 'Unknown')}"):
869
+ col1, col2 = st.columns(2)
870
+
871
+ with col1:
872
+ st.write(f"**Depth:** {layer.get('depth_from', 0)} - {layer.get('depth_to', 0)} m")
873
+ st.write(f"**Thickness:** {layer.get('depth_to', 0) - layer.get('depth_from', 0):.1f} m")
874
+ st.write(f"**Soil Type:** {layer.get('soil_type', 'N/A')}")
875
+ st.write(f"**Color:** {layer.get('color', 'N/A')}")
876
+
877
+ with col2:
878
+ st.write(f"**Strength Parameter:** {layer.get('strength_parameter', 'N/A')}")
879
+ st.write(f"**Strength Value:** {layer.get('strength_value', 'N/A')}")
880
+ st.write(f"**Moisture:** {layer.get('moisture', 'N/A')}")
881
+ st.write(f"**Consistency:** {layer.get('consistency', 'N/A')}")
882
+
883
+ if layer.get('description'):
884
+ st.write(f"**Description:** {layer.get('description')}")
885
+
886
+ def display_optimization_results(analysis_results):
887
+ """Display optimization suggestions"""
888
+ st.subheader("Layer Optimization Suggestions")
889
+
890
+ optimization = analysis_results.get("optimization", {})
891
+
892
+ if not optimization:
893
+ st.info("No optimization results available")
894
+ return
895
+
896
+ # Merge suggestions
897
+ merge_suggestions = optimization.get("merge_suggestions", {}).get("suggestions", [])
898
+ if merge_suggestions:
899
+ st.subheader("πŸ”— Merge Suggestions")
900
+ for i, suggestion in enumerate(merge_suggestions):
901
+ st.info(f"**Suggestion {i+1}:** {suggestion['reason']}")
902
+ st.write(f"Layers to merge: {suggestion['layer_indices']}")
903
+ else:
904
+ st.success("βœ… No merge suggestions - layers are optimally divided")
905
+
906
+ # Split suggestions
907
+ split_suggestions = optimization.get("split_suggestions", {}).get("suggestions", [])
908
+ if split_suggestions:
909
+ st.subheader("βœ‚οΈ Split Suggestions")
910
+ for i, suggestion in enumerate(split_suggestions):
911
+ st.warning(f"**Suggestion {i+1}:** {suggestion['reason']}")
912
+ if "suggested_depths" in suggestion:
913
+ st.write(f"Suggested split depths: {suggestion['suggested_depths']}")
914
+ else:
915
+ st.success("βœ… No split suggestions - layer thicknesses are appropriate")
916
+
917
+ # Statistics
918
+ if "validation_stats" in analysis_results:
919
+ st.subheader("πŸ“Š Profile Statistics")
920
+ stats = analysis_results["validation_stats"]
921
+
922
+ col1, col2, col3, col4 = st.columns(4)
923
+ with col1:
924
+ st.metric("Total Depth", f"{stats.get('total_depth', 0):.1f} m")
925
+ with col2:
926
+ st.metric("Layer Count", stats.get('layer_count', 0))
927
+ with col3:
928
+ st.metric("Avg Thickness", f"{stats.get('average_layer_thickness', 0):.1f} m")
929
+ with col4:
930
+ st.metric("Thickest Layer", f"{stats.get('thickest_layer', 0):.1f} m")
931
+
932
+ def display_nearest_neighbor_analysis(analysis_results):
933
+ """Display nearest neighbor analysis results"""
934
+ st.subheader("🎯 Nearest Neighbor Analysis")
935
+ st.markdown("*Advanced layer grouping using machine learning similarity analysis*")
936
+
937
+ optimization = analysis_results.get("optimization", {})
938
+ nn_analysis = optimization.get("nearest_neighbor_analysis", {})
939
+
940
+ if "error" in nn_analysis:
941
+ st.error(f"Analysis error: {nn_analysis['error']}")
942
+ return
943
+
944
+ if "message" in nn_analysis:
945
+ st.info(nn_analysis["message"])
946
+ return
947
+
948
+ # Analysis parameters
949
+ params = nn_analysis.get("analysis_parameters", {})
950
+ st.info(f"πŸ“‹ Analysis: {params.get('total_layers', 0)} layers, {params.get('k_neighbors', 3)} nearest neighbors, {params.get('similarity_threshold', 0.75)*100:.0f}% similarity threshold")
951
+
952
+ # Grouping summary
953
+ neighbor_groups = nn_analysis.get("neighbor_groups", [])
954
+ merge_recommendations = nn_analysis.get("merge_recommendations", [])
955
+
956
+ col1, col2 = st.columns(2)
957
+ with col1:
958
+ st.metric("πŸ”— Similar Groups Found", len(neighbor_groups))
959
+ with col2:
960
+ st.metric("πŸ“‹ Merge Recommendations", len(merge_recommendations))
961
+
962
+ # Show merge recommendations
963
+ if merge_recommendations:
964
+ st.subheader("🎯 Recommended Layer Merging")
965
+
966
+ for i, rec in enumerate(merge_recommendations):
967
+ with st.expander(f"πŸ“Œ Recommendation {i+1}: Merge Group {rec.get('group_id', '?')}"):
968
+ st.write(f"**Reason:** {rec.get('reason', 'N/A')}")
969
+ st.write(f"**Layers to merge:** {', '.join(map(str, rec.get('layer_ids', [])))}")
970
+ st.write(f"**Depth ranges:** {', '.join(rec.get('depth_ranges', []))}")
971
+
972
+ merged_props = rec.get('merged_properties', {})
973
+ if merged_props:
974
+ st.write("**Merged layer properties:**")
975
+ col1, col2, col3 = st.columns(3)
976
+ with col1:
977
+ st.write(f"- Soil type: {merged_props.get('soil_type', 'N/A')}")
978
+ st.write(f"- Consistency: {merged_props.get('consistency', 'N/A')}")
979
+ with col2:
980
+ st.write(f"- Depth: {merged_props.get('depth_from', 0):.1f}-{merged_props.get('depth_to', 0):.1f}m")
981
+ st.write(f"- Thickness: {merged_props.get('thickness', 0):.1f}m")
982
+ with col3:
983
+ st.write(f"- Avg strength: {merged_props.get('avg_strength', 0):.1f}")
984
+
985
+ # Show detailed groups
986
+ if neighbor_groups:
987
+ st.subheader("πŸ“Š Similar Layer Groups")
988
+
989
+ for group in neighbor_groups:
990
+ group_id = group.get('group_id', '?')
991
+ group_size = group.get('group_size', 0)
992
+ depth_range = group.get('depth_range', {})
993
+
994
+ with st.expander(f"πŸ”— Group {group_id} ({group_size} layers)"):
995
+ col1, col2 = st.columns(2)
996
+
997
+ with col1:
998
+ st.write("**Group Properties:**")
999
+ st.write(f"- Depth range: {depth_range.get('min', 0):.1f}-{depth_range.get('max', 0):.1f}m")
1000
+ st.write(f"- Total thickness: {depth_range.get('total_thickness', 0):.1f}m")
1001
+ st.write(f"- Layer IDs: {', '.join(map(str, group.get('layer_ids', [])))}")
1002
+
1003
+ with col2:
1004
+ st.write("**Soil Type Distribution:**")
1005
+ soil_types = group.get('soil_types', {})
1006
+ for soil_type, count in soil_types.items():
1007
+ st.write(f"- {soil_type}: {count} layer(s)")
1008
+
1009
+ st.write("**Consistency Distribution:**")
1010
+ consistencies = group.get('consistencies', {})
1011
+ for consistency, count in consistencies.items():
1012
+ st.write(f"- {consistency}: {count} layer(s)")
1013
+
1014
+ # Strength statistics
1015
+ strength_stats = group.get('strength_stats', {})
1016
+ if strength_stats.get('mean', 0) > 0:
1017
+ st.write("**Strength Statistics:**")
1018
+ st.write(f"- Mean: {strength_stats.get('mean', 0):.1f}")
1019
+ st.write(f"- Range: {strength_stats.get('min', 0):.1f} - {strength_stats.get('max', 0):.1f}")
1020
+ st.write(f"- Std Dev: {strength_stats.get('std', 0):.1f}")
1021
+
1022
+ # Show detailed neighbor report
1023
+ neighbor_report = nn_analysis.get("neighbor_report", "")
1024
+ if neighbor_report:
1025
+ st.subheader("πŸ“‹ Detailed Neighbor Analysis")
1026
+ with st.expander("πŸ” View Full Neighbor Report"):
1027
+ st.text(neighbor_report)
1028
+
1029
+ # Interactive controls
1030
+ st.subheader("βš™οΈ Analysis Controls")
1031
+ col1, col2 = st.columns(2)
1032
+
1033
+ with col1:
1034
+ new_threshold = st.slider(
1035
+ "Similarity Threshold",
1036
+ min_value=0.5,
1037
+ max_value=0.95,
1038
+ value=params.get('similarity_threshold', 0.75),
1039
+ step=0.05,
1040
+ help="Higher values require more similarity for grouping"
1041
+ )
1042
+
1043
+ with col2:
1044
+ new_k = st.slider(
1045
+ "Number of Neighbors",
1046
+ min_value=1,
1047
+ max_value=min(10, params.get('total_layers', 3)-1),
1048
+ value=params.get('k_neighbors', 3),
1049
+ help="Number of nearest neighbors to analyze"
1050
+ )
1051
+
1052
+ if st.button("πŸ”„ Rerun Analysis with New Parameters"):
1053
+ # This would trigger a reanalysis - for now just show info
1054
+ st.info("πŸ’‘ Reanalysis feature will be available in the feedback processing section")
1055
+
1056
+ def display_insights(analysis_results):
1057
+ """Display AI-generated insights"""
1058
+ st.subheader("πŸ€– AI-Generated Insights")
1059
+
1060
+ insights = analysis_results.get("insights", "")
1061
+
1062
+ if insights:
1063
+ st.markdown(insights)
1064
+ else:
1065
+ st.info("No insights available")
1066
+
1067
+ # Feedback section
1068
+ st.subheader("πŸ’¬ Provide Feedback")
1069
+ feedback = st.text_area(
1070
+ "Provide feedback to improve the analysis:",
1071
+ placeholder="e.g., 'The clay layer at 5-8m should be split into soft and stiff clay layers'"
1072
+ )
1073
+
1074
+ if st.button("Submit Feedback"):
1075
+ if feedback:
1076
+ with st.spinner("Processing feedback..."):
1077
+ try:
1078
+ from llm_client import LLMClient
1079
+ # Use selected model and current API key
1080
+ provider, model = get_current_provider_and_model()
1081
+ selected_model = st.session_state.get('selected_model', model)
1082
+ current_api_key = get_api_key_for_current_provider()
1083
+ llm_client = LLMClient(model=selected_model, api_key=current_api_key)
1084
+
1085
+ current_results = st.session_state.analysis_results
1086
+ current_soil_data = current_results.get("soil_data", {})
1087
+
1088
+ # Refine soil layers based on feedback
1089
+ refined_data = llm_client.refine_soil_layers(current_soil_data, feedback)
1090
+
1091
+ if "error" not in refined_data:
1092
+ # Update with refined data
1093
+ st.session_state.analysis_results["soil_data"] = refined_data
1094
+ st.success("βœ… Feedback processed! Analysis updated.")
1095
+ st.rerun()
1096
+ else:
1097
+ st.error(f"❌ Error processing feedback: {refined_data.get('error', 'Unknown error')}")
1098
+ except Exception as e:
1099
+ st.error(f"❌ Error processing feedback: {str(e)}")
1100
+
1101
+ def display_export_options(soil_data):
1102
+ """Display export options"""
1103
+ st.subheader("πŸ“ Export Options")
1104
+
1105
+ if "soil_layers" not in soil_data or not soil_data["soil_layers"]:
1106
+ st.warning("No data to export")
1107
+ return
1108
+
1109
+ export_format = st.selectbox("Select export format:", ["CSV", "JSON", "Text"])
1110
+
1111
+ if st.button("Generate Export"):
1112
+ try:
1113
+ if export_format == "CSV":
1114
+ export_data = st.session_state.visualizer.export_profile_data(soil_data, "csv")
1115
+ st.download_button(
1116
+ label="πŸ“₯ Download CSV",
1117
+ data=export_data,
1118
+ file_name="soil_profile.csv",
1119
+ mime="text/csv"
1120
+ )
1121
+ elif export_format == "JSON":
1122
+ export_data = json.dumps(soil_data, indent=2)
1123
+ st.download_button(
1124
+ label="πŸ“₯ Download JSON",
1125
+ data=export_data,
1126
+ file_name="soil_profile.json",
1127
+ mime="application/json"
1128
+ )
1129
+ else: # Text
1130
+ export_data = st.session_state.visualizer.export_profile_data(soil_data, "text")
1131
+ st.download_button(
1132
+ label="πŸ“₯ Download Text",
1133
+ data=export_data,
1134
+ file_name="soil_profile.txt",
1135
+ mime="text/plain"
1136
+ )
1137
+ except Exception as e:
1138
+ st.error(f"Export failed: {str(e)}")
1139
+
1140
+ # Preview export data
1141
+ with st.expander("Preview Export Data"):
1142
+ df = st.session_state.visualizer.create_layer_summary_table(soil_data)
1143
+ if df is not None:
1144
+ st.dataframe(df)
1145
+
1146
+ def display_ss_st_processing(soil_data):
1147
+ """Display SS/ST sample processing details"""
1148
+ st.subheader("πŸ§ͺ Split Spoon (SS) & Shelby Tube (ST) Processing")
1149
+
1150
+ if "soil_layers" not in soil_data or not soil_data["soil_layers"]:
1151
+ st.warning("No soil layers found for SS/ST analysis")
1152
+ return
1153
+
1154
+ layers = soil_data["soil_layers"]
1155
+
1156
+ # Enhanced Su Value Processing Summary
1157
+ st.subheader("πŸ“Š Enhanced Su Value Processing")
1158
+ su_processing_stats = analyze_su_processing(layers)
1159
+
1160
+ if su_processing_stats['multiple_su_layers'] > 0:
1161
+ col1, col2, col3, col4 = st.columns(4)
1162
+
1163
+ with col1:
1164
+ st.metric("Layers with Multiple Su", su_processing_stats['multiple_su_layers'])
1165
+ with col2:
1166
+ st.metric("Su Values Averaged", su_processing_stats['averaged_layers'])
1167
+ with col3:
1168
+ st.metric("Subdivision Recommended", su_processing_stats['subdivision_recommended'])
1169
+ with col4:
1170
+ st.metric("Su Ranges Processed", su_processing_stats['range_processed'])
1171
+
1172
+ # Show subdivision recommendations
1173
+ if su_processing_stats['subdivision_details']:
1174
+ st.subheader("πŸ”„ Layer Subdivision Recommendations")
1175
+ for detail in su_processing_stats['subdivision_details']:
1176
+ st.warning(f"**Layer {detail['layer_id']}**: {detail['reason']}")
1177
+ st.info(f" β€’ Su values found: {detail['su_values']}")
1178
+ st.info(f" β€’ Variation ratio: {detail['ratio']:.1f}x")
1179
+
1180
+ # Show averaging results
1181
+ if su_processing_stats['averaging_details']:
1182
+ st.subheader("πŸ“ˆ Su Value Averaging Results")
1183
+ for detail in su_processing_stats['averaging_details']:
1184
+ st.success(f"**Layer {detail['layer_id']}**: {detail['description']}")
1185
+ else:
1186
+ st.info("No multiple Su values detected in layers - using single values as found")
1187
+
1188
+ # Processing summary from the enhanced calculator
1189
+ processing_summary = soil_data.get("processing_summary", {})
1190
+
1191
+ if processing_summary:
1192
+ st.subheader("πŸ“Š Processing Summary")
1193
+ col1, col2, col3, col4 = st.columns(4)
1194
+
1195
+ with col1:
1196
+ st.metric("Total Layers", processing_summary.get('total_layers', 0))
1197
+ st.metric("ST Samples", processing_summary.get('st_samples', 0))
1198
+
1199
+ with col2:
1200
+ st.metric("SS Samples", processing_summary.get('ss_samples', 0))
1201
+ st.metric("Clay Layers", processing_summary.get('clay_layers', 0))
1202
+
1203
+ with col3:
1204
+ st.metric("Sand/Silt Layers", processing_summary.get('sand_layers', 0))
1205
+ st.metric("Su Calculated", processing_summary.get('su_calculated', 0))
1206
+
1207
+ with col4:
1208
+ st.metric("Ο† Calculated", processing_summary.get('phi_calculated', 0))
1209
+
1210
+ # Add clay consistency check summary if available
1211
+ if processing_summary.get('clay_consistency_checks', 0) > 0:
1212
+ st.subheader("πŸ§ͺ Clay Consistency Checks")
1213
+ col1, col2, col3 = st.columns(3)
1214
+
1215
+ with col1:
1216
+ st.metric("Total Checks", processing_summary.get('clay_consistency_checks', 0))
1217
+ with col2:
1218
+ st.metric("βœ… Consistent", processing_summary.get('consistent_clays', 0))
1219
+ with col3:
1220
+ st.metric("⚠️ Inconsistent", processing_summary.get('inconsistent_clays', 0))
1221
+
1222
+ # Detailed layer processing
1223
+ st.subheader("πŸ”¬ Layer-by-Layer Processing Details")
1224
+
1225
+ for i, layer in enumerate(layers):
1226
+ layer_id = layer.get('layer_id', i+1)
1227
+ depth_range = f"{layer.get('depth_from', 0):.1f}-{layer.get('depth_to', 0):.1f}m"
1228
+ sample_type = layer.get('sample_type', 'Unknown')
1229
+ soil_type = layer.get('soil_type', 'unknown')
1230
+ consistency = layer.get('consistency', '')
1231
+
1232
+ with st.expander(f"πŸ“‹ Layer {layer_id}: {depth_range} - {sample_type} Sample"):
1233
+ col1, col2 = st.columns(2)
1234
+
1235
+ with col1:
1236
+ st.write("**Sample Information:**")
1237
+ st.write(f"- Sample Type: {sample_type}")
1238
+ st.write(f"- Soil Type: {consistency} {soil_type}")
1239
+ st.write(f"- Description: {layer.get('description', 'N/A')}")
1240
+
1241
+ # Sieve analysis
1242
+ sieve_200 = layer.get('sieve_200_passing')
1243
+ if sieve_200 is not None:
1244
+ st.write(f"- Sieve #200: {sieve_200}% passing")
1245
+ if sieve_200 > 50:
1246
+ st.success(" β†’ Classified as fine-grained (clay/silt)")
1247
+ else:
1248
+ st.info(" β†’ Classified as coarse-grained (sand/gravel)")
1249
+ else:
1250
+ st.write("- Sieve #200: No data")
1251
+ if soil_type == 'clay':
1252
+ st.info(" β†’ Assumed >50% passing (clay)")
1253
+
1254
+ with col2:
1255
+ st.write("**Strength Parameters:**")
1256
+ strength_param = layer.get('strength_parameter', 'N/A')
1257
+ strength_value = layer.get('strength_value', 'N/A')
1258
+ strength_unit = layer.get('strength_unit', '')
1259
+
1260
+ st.write(f"- Parameter: {strength_param}")
1261
+ st.write(f"- Value: {strength_value} {strength_unit}")
1262
+
1263
+ # Processing method
1264
+ processing_method = layer.get('processing_method', 'N/A')
1265
+ st.write(f"- Processing: {processing_method}")
1266
+
1267
+ # Show calculation sources
1268
+ if 'su_source' in layer:
1269
+ st.info(f"πŸ“Š Su: {layer['su_source']}")
1270
+ if 'phi_source' in layer:
1271
+ st.info(f"πŸ“Š Ο†: {layer['phi_source']}")
1272
+ if 'original_spt' in layer:
1273
+ st.info(f"πŸ“Š Original SPT-N: {layer['original_spt']}")
1274
+
1275
+ # Unit weight if calculated
1276
+ if 'unit_weight' in layer:
1277
+ unit_weight = layer['unit_weight']
1278
+ unit_weight_unit = layer.get('unit_weight_unit', 'kN/mΒ³')
1279
+ st.write(f"- Unit Weight: {unit_weight:.1f} {unit_weight_unit}")
1280
+
1281
+ # Water content and consistency check for clay
1282
+ if layer.get('soil_type') == 'clay':
1283
+ water_content = layer.get('water_content')
1284
+ if water_content is not None:
1285
+ st.write(f"- Water Content: {water_content}%")
1286
+
1287
+ if 'consistency_note' in layer:
1288
+ if layer['consistency_note'].startswith('βœ…'):
1289
+ st.success(layer['consistency_note'])
1290
+ else:
1291
+ st.warning(layer['consistency_note'])
1292
+
1293
+
1294
+ # SS/ST Processing Guidelines
1295
+ st.subheader("πŸ“– Processing Guidelines Applied")
1296
+
1297
+ col1, col2 = st.columns(2)
1298
+
1299
+ with col1:
1300
+ st.write("**ST (Shelby Tube) Samples:**")
1301
+ st.write("- Use Su values from unconfined compression test")
1302
+ st.write("- Undisturbed samples for accurate strength")
1303
+ st.write("- Typical for clay characterization")
1304
+ st.write("- Units converted to kPa")
1305
+
1306
+ with col2:
1307
+ st.write("**SS (Split Spoon) Samples:**")
1308
+ st.write("- Use SPT-N values from penetration test")
1309
+ st.write("- Clay: Convert N to Su using Su = 5Γ—N")
1310
+ st.write("- Sand: Convert N to Ο† using Peck method")
1311
+ st.write("- Standard field testing method")
1312
+
1313
+ # Unit conversion summary
1314
+ st.subheader("πŸ”„ Unit Conversion to SI")
1315
+ st.write("All measurements converted to SI units:")
1316
+ st.write("- **Su (Undrained Shear Strength)**: kPa")
1317
+ st.write(" - ksc (kg/cmΒ²) β†’ kPa (multiply by 98)")
1318
+ st.write(" - t/mΒ² (tonnes/mΒ²) β†’ kPa (multiply by 9.81)")
1319
+ st.write(" - psi β†’ kPa (multiply by 6.89)")
1320
+ st.write(" - psf β†’ kPa (multiply by 0.048)")
1321
+ st.write("- **Ο† (Friction Angle)**: degrees")
1322
+ st.write("- **Unit Weight**: kN/mΒ³")
1323
+ st.write("- **Depth**: meters (ft β†’ m, multiply by 0.305)")
1324
+
1325
+ # Classification criteria
1326
+ st.subheader("🎯 Soil Classification Criteria")
1327
+ st.write("Sieve analysis (#200) classification:")
1328
+ st.write("- **>50% passing**: Fine-grained soil (clay/silt)")
1329
+ st.write("- **<50% passing**: Coarse-grained soil (sand/gravel)")
1330
+ st.write("- **No data available**: Assumed clay (>50% passing)")
1331
+
1332
+ def display_crewai_analysis(analysis_results):
1333
+ """Display CrewAI two-agent analysis results"""
1334
+ st.subheader("πŸ€– CrewAI Two-Agent Analysis")
1335
+ st.markdown("*Advanced geotechnical analysis using specialized agents with quality control*")
1336
+
1337
+ # Unit conversion warning/info
1338
+ st.info("πŸ”§ **Unit Conversion Focus**: CrewAI agents specifically check t/mΒ² β†’ kPa conversion (Γ—9.81) and other critical unit conversions")
1339
+ st.info("πŸ“ **Layer Splitting Focus**: CrewAI agents analyze Su value consistency within layers and split layers when Su values vary by >30% or have >2x ratio")
1340
+
1341
+ crewai_analysis = analysis_results.get("crewai_analysis", {})
1342
+
1343
+ if not crewai_analysis:
1344
+ st.info("No CrewAI analysis results available")
1345
+ return
1346
+
1347
+ # Analysis status
1348
+ status = crewai_analysis.get("status", "unknown")
1349
+ workflow = crewai_analysis.get("workflow", "unknown")
1350
+
1351
+ col1, col2 = st.columns(2)
1352
+ with col1:
1353
+ if status == "approved":
1354
+ st.success("βœ… Analysis Status: APPROVED")
1355
+ elif status == "completed_with_revision":
1356
+ st.warning("πŸ”„ Analysis Status: COMPLETED WITH REVISION")
1357
+ else:
1358
+ st.info(f"πŸ“‹ Analysis Status: {status.upper()}")
1359
+
1360
+ with col2:
1361
+ st.info(f"πŸ”— Workflow: {workflow.replace('_', ' ').title()}")
1362
+
1363
+ # Display results based on workflow type
1364
+ if status == "completed_with_revision":
1365
+ st.subheader("πŸ”„ Multi-Stage Analysis Process")
1366
+
1367
+ # Initial analysis
1368
+ initial_analysis = crewai_analysis.get("initial_analysis", "")
1369
+ if initial_analysis:
1370
+ with st.expander("πŸ“ Initial Geotech Engineer Analysis"):
1371
+ st.markdown(initial_analysis)
1372
+
1373
+ # Initial review
1374
+ initial_review = crewai_analysis.get("initial_review", "")
1375
+ if initial_review:
1376
+ with st.expander("πŸ•΅οΈ Senior Engineer Initial Review"):
1377
+ st.markdown(initial_review)
1378
+
1379
+ # Re-investigation
1380
+ reinvestigation = crewai_analysis.get("reinvestigation", "")
1381
+ if reinvestigation:
1382
+ with st.expander("πŸ” Re-investigation Based on Review"):
1383
+ st.markdown(reinvestigation)
1384
+
1385
+ # Final review
1386
+ final_review = crewai_analysis.get("final_review", "")
1387
+ if final_review:
1388
+ with st.expander("βœ… Final Senior Review & Approval"):
1389
+ st.markdown(final_review)
1390
+
1391
+ st.success("🎯 **Quality Control Process**: The senior engineer identified issues in the initial analysis and required re-investigation, resulting in a more accurate final assessment.")
1392
+
1393
+ else:
1394
+ # Single stage approval
1395
+ st.subheader("βœ… Single-Stage Analysis Process")
1396
+
1397
+ # Analysis
1398
+ analysis = crewai_analysis.get("analysis", "")
1399
+ if analysis:
1400
+ with st.expander("πŸ“ Geotech Engineer Analysis"):
1401
+ st.markdown(analysis)
1402
+
1403
+ # Review
1404
+ review = crewai_analysis.get("review", "")
1405
+ if review:
1406
+ with st.expander("βœ… Senior Engineer Review & Approval"):
1407
+ st.markdown(review)
1408
+
1409
+ st.success("🎯 **Quality Control Result**: The analysis passed senior engineer review on the first attempt - high confidence in results.")
1410
+
1411
+ # Analysis insights
1412
+ st.subheader("πŸ”¬ Agent Specialization Benefits")
1413
+
1414
+ col1, col2 = st.columns(2)
1415
+
1416
+ with col1:
1417
+ st.write("**πŸ‘¨β€πŸ’Ό Geotech Engineer Agent:**")
1418
+ st.write("β€’ Focuses on data extraction accuracy")
1419
+ st.write("β€’ Applies standard classification methods")
1420
+ st.write("β€’ Performs comprehensive parameter analysis")
1421
+ st.write("β€’ Documents assumptions and methodology")
1422
+
1423
+ with col2:
1424
+ st.write("**πŸ‘¨β€πŸ« Senior Geotech Reviewer Agent:**")
1425
+ st.write("β€’ Validates parameter consistency")
1426
+ st.write("β€’ Checks engineering reasonableness")
1427
+ st.write("β€’ Identifies unusual correlations")
1428
+ st.write("β€’ Ensures quality control standards")
1429
+
1430
+ # Consistency checks performed
1431
+ st.subheader("πŸ” Consistency Checks Performed")
1432
+ st.write("The senior engineer agent automatically validates:")
1433
+
1434
+ checks = [
1435
+ "**CRITICAL: Unit Conversion Accuracy** - t/mΒ² β†’ kPa (Γ—9.81), ksc β†’ kPa (Γ—98), psi β†’ kPa (Γ—6.895)",
1436
+ "**CRITICAL: Layer Splitting Analysis** - Su value consistency within layers, splitting when variation >30%",
1437
+ "Su (undrained shear strength) vs Water Content relationships",
1438
+ "SPT N-values vs Soil Consistency correlations",
1439
+ "Layer transition logic and continuity",
1440
+ "Parameter ranges within expected bounds",
1441
+ "Classification consistency across depth",
1442
+ "Verification of all conversion factors applied"
1443
+ ]
1444
+
1445
+ for check in checks:
1446
+ st.write(f"βœ“ {check}")
1447
+
1448
+ # Recommendations
1449
+ st.subheader("πŸ’‘ CrewAI Analysis Recommendations")
1450
+
1451
+ if status == "completed_with_revision":
1452
+ st.info("🎯 **Recommendation**: Use the final revised analysis as it has undergone rigorous quality control and addresses all consistency issues identified by the senior engineer.")
1453
+ st.warning("⚠️ **Note**: Initial analysis contained inconsistencies that were corrected through the re-investigation process.")
1454
+ else:
1455
+ st.success("🎯 **Recommendation**: Analysis is reliable and can be used with confidence as it passed senior engineer review without requiring revision.")
1456
+
1457
+ # Comparison note
1458
+ st.subheader("πŸ“Š Comparison with Other Methods")
1459
+ st.info("πŸ’‘ **Advantage**: CrewAI's two-agent system provides built-in quality control that single-agent approaches lack. The senior engineer agent acts as an independent validator, catching issues that might be missed in single-pass analysis.")
1460
+
1461
+ def analyze_su_processing(layers):
1462
+ """Analyze Su processing statistics from layers"""
1463
+ stats = {
1464
+ 'multiple_su_layers': 0,
1465
+ 'averaged_layers': 0,
1466
+ 'subdivision_recommended': 0,
1467
+ 'range_processed': 0,
1468
+ 'subdivision_details': [],
1469
+ 'averaging_details': []
1470
+ }
1471
+
1472
+ for layer in layers:
1473
+ layer_id = layer.get('layer_id', '?')
1474
+
1475
+ # Check for multiple Su processing indicators
1476
+ if layer.get('su_processing_applied'):
1477
+ stats['multiple_su_layers'] += 1
1478
+
1479
+ if layer.get('su_averaged'):
1480
+ stats['averaged_layers'] += 1
1481
+ su_values = layer.get('su_values_found', [])
1482
+ avg_used = layer.get('su_average_used', 0)
1483
+ stats['averaging_details'].append({
1484
+ 'layer_id': layer_id,
1485
+ 'description': f"Averaged {len(su_values)} Su values to {avg_used:.1f} kPa",
1486
+ 'su_values': su_values
1487
+ })
1488
+
1489
+ if layer.get('subdivision_suggested'):
1490
+ stats['subdivision_recommended'] += 1
1491
+ su_values = layer.get('su_values_found', [])
1492
+ ratio = layer.get('su_variation_ratio', 0)
1493
+ reason = layer.get('subdivision_reason', 'High variation detected')
1494
+ stats['subdivision_details'].append({
1495
+ 'layer_id': layer_id,
1496
+ 'reason': reason,
1497
+ 'su_values': su_values,
1498
+ 'ratio': ratio
1499
+ })
1500
+
1501
+ if layer.get('su_range_found'):
1502
+ stats['range_processed'] += 1
1503
+
1504
+ return stats
1505
+
1506
+ def display_validation_recommendations(validation_recs: dict):
1507
+ """Display validation recommendations for Su-water content issues"""
1508
+
1509
+ # Critical unit errors
1510
+ critical_errors = validation_recs.get("critical_unit_errors", [])
1511
+ if critical_errors:
1512
+ st.error("🚨 CRITICAL UNIT CONVERSION ERRORS DETECTED")
1513
+
1514
+ with st.expander("⚠️ Critical Issues - Action Required", expanded=True):
1515
+ st.error("The following Su values appear to be in wrong units:")
1516
+ for error in critical_errors:
1517
+ st.error(f"β€’ {error}")
1518
+
1519
+ st.markdown("### πŸ”§ **Recommended Actions:**")
1520
+ st.warning("1. **Check Unit Conversions Carefully:**")
1521
+ st.code("""
1522
+ t/mΒ² β†’ kPa: multiply by 9.81
1523
+ ksc β†’ kPa: multiply by 98.0
1524
+ psi β†’ kPa: multiply by 6.895
1525
+ MPa β†’ kPa: multiply by 1000
1526
+ """)
1527
+
1528
+ st.warning("2. **Re-examine Original Document:**")
1529
+ st.info("β€’ Look for Su unit labels in the source document")
1530
+ st.info("β€’ Check if values are consistent with typical ranges")
1531
+ st.info("β€’ Verify water content readings as well")
1532
+
1533
+ # Image recheck needed
1534
+ recheck_needed = validation_recs.get("recheck_image", [])
1535
+ if recheck_needed:
1536
+ st.warning("πŸ“· IMAGE RECHECK RECOMMENDED")
1537
+
1538
+ with st.expander("πŸ” Su-Water Content Inconsistencies", expanded=True):
1539
+ st.warning("The following layers have inconsistent Su-water content relationships:")
1540
+ for recheck in recheck_needed:
1541
+ st.warning(f"β€’ {recheck}")
1542
+
1543
+ st.markdown("### πŸ“‹ **Recommended Actions:**")
1544
+
1545
+ col1, col2, col3 = st.columns(3)
1546
+
1547
+ with col1:
1548
+ if st.button("πŸ”„ Reload Image", help="Upload the same image again for re-analysis"):
1549
+ st.info("πŸ‘† Use the file uploader in the sidebar to reload the image")
1550
+ st.session_state.analysis_results = None
1551
+ st.rerun()
1552
+
1553
+ with col2:
1554
+ if st.button("πŸ“· Upload Different Image", help="Try a different scan/photo of the same document"):
1555
+ st.info("πŸ‘† Use the file uploader in the sidebar to try a different image")
1556
+ st.session_state.analysis_results = None
1557
+ st.rerun()
1558
+
1559
+ with col3:
1560
+ if st.button("πŸ€– Try Different Model", help="Use a different LLM model for analysis"):
1561
+ st.info("πŸ‘† Select a different model in the sidebar and re-analyze")
1562
+ st.session_state.analysis_results = None
1563
+ st.rerun()
1564
+
1565
+ st.markdown("### πŸ’‘ **What to Check:**")
1566
+ st.info("β€’ Su values and their units (kPa, t/mΒ², ksc, psi, MPa)")
1567
+ st.info("β€’ Water content percentages")
1568
+ st.info("β€’ Image quality and readability")
1569
+ st.info("β€’ Consistency between different test parameters")
1570
+
1571
+ # General warnings
1572
+ general_warnings = validation_recs.get("general_warnings", [])
1573
+ if general_warnings:
1574
+ with st.expander("⚠️ General Validation Warnings"):
1575
+ for warning in general_warnings:
1576
+ st.warning(f"β€’ {warning}")
1577
+ st.info("πŸ’‘ These are minor inconsistencies that may be acceptable depending on local conditions")
1578
+
1579
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
1580
  main()