Quantum9999 commited on
Commit
4ea962a
Β·
verified Β·
1 Parent(s): e6b4fcd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +153 -398
app.py CHANGED
@@ -1,429 +1,184 @@
1
  """
2
- Streamlit Application for Engine Predictive Maintenance
 
3
  """
4
 
5
  import streamlit as st
6
- import pandas as pd
7
- import os
8
  import sys
 
9
 
10
- # Print to console (will show in HF Space logs)
11
- print("=" * 70, file=sys.stderr)
12
- print("APP STARTING - INITIALIZATION", file=sys.stderr)
13
- print("=" * 70, file=sys.stderr)
14
 
15
- # Page Configuration MUST be first
16
- st.set_page_config(
17
- page_title="Engine Predictive Maintenance",
18
- page_icon="πŸ”§",
19
- layout="wide",
20
- initial_sidebar_state="expanded"
21
- )
 
 
 
 
 
 
22
 
23
- # Import after page config
 
24
  try:
25
- print("Importing huggingface_hub...", file=sys.stderr)
 
 
 
 
26
  from huggingface_hub import hf_hub_download, login
27
- print("Importing joblib...", file=sys.stderr)
 
 
28
  import joblib
 
 
29
  print("βœ“ All imports successful", file=sys.stderr)
30
  except Exception as e:
31
- print(f"βœ— Import error: {e}", file=sys.stderr)
32
- st.error(f"Import failed: {e}")
 
 
33
  st.stop()
34
 
35
- # CRITICAL: Feature columns must EXACTLY match model training
36
- FEATURE_COLUMNS = [
37
- "Engine rpm",
38
- "Lub oil pressure",
39
- "Fuel pressure",
40
- "Coolant pressure",
41
- "lub oil temp",
42
- "Coolant temp"
43
- ]
44
-
45
- # Custom CSS
46
- st.markdown("""
47
- <style>
48
- .main-header {
49
- font-size: 42px;
50
- font-weight: bold;
51
- color: #1f77b4;
52
- text-align: center;
53
- margin-bottom: 10px;
54
- }
55
- .sub-header {
56
- font-size: 18px;
57
- color: #555;
58
- text-align: center;
59
- margin-bottom: 30px;
60
- }
61
- .prediction-box {
62
- padding: 20px;
63
- border-radius: 10px;
64
- text-align: center;
65
- font-size: 24px;
66
- font-weight: bold;
67
- margin-top: 20px;
68
- }
69
- .normal {
70
- background-color: #d4edda;
71
- color: #155724;
72
- border: 2px solid #c3e6cb;
73
- }
74
- .maintenance {
75
- background-color: #f8d7da;
76
- color: #721c24;
77
- border: 2px solid #f5c6cb;
78
- }
79
- .metric-card {
80
- background-color: #f8f9fa;
81
- padding: 15px;
82
- border-radius: 8px;
83
- border-left: 4px solid #1f77b4;
84
- }
85
- </style>
86
- """, unsafe_allow_html=True)
87
 
 
 
 
 
88
 
89
- @st.cache_resource
90
- def load_model():
91
- """Load model from Hugging Face with detailed logging and retries"""
92
-
93
- print("\n" + "=" * 70, file=sys.stderr)
94
- print("LOADING MODEL FROM HUGGING FACE", file=sys.stderr)
95
- print("=" * 70, file=sys.stderr)
96
 
97
- max_retries = 3
98
- retry_count = 0
 
 
 
 
99
 
100
- while retry_count < max_retries:
101
- try:
102
- # CORRECT: Use HF_TOKEN (as configured in your HF Space secrets)
103
- hf_token = os.environ.get("HF_TOKEN")
104
- print(f"HF_TOKEN found: {hf_token is not None}", file=sys.stderr)
105
-
106
- if hf_token:
107
- print("Authenticating with Hugging Face...", file=sys.stderr)
108
- login(token=hf_token)
109
- print("βœ“ Authentication successful", file=sys.stderr)
110
- else:
111
- print("⚠ No HF_TOKEN - attempting public access", file=sys.stderr)
112
-
113
- # Download model
114
- print("\nDownloading model...", file=sys.stderr)
115
- print(" Repo: Quantum9999/xgb-predictive-maintenance", file=sys.stderr)
116
- print(" File: xgb_tuned_model.joblib", file=sys.stderr)
117
-
118
- model_path = hf_hub_download(
119
- repo_id="Quantum9999/xgb-predictive-maintenance",
120
- filename="xgb_tuned_model.joblib",
121
- token=hf_token,
122
- cache_dir="/tmp/hf_cache" # Use tmp for faster access
123
- )
124
- print(f"βœ“ Model downloaded: {model_path}", file=sys.stderr)
125
-
126
- # Load model
127
- print("Loading model into memory...", file=sys.stderr)
128
- model = joblib.load(model_path)
129
- print("βœ“ Model loaded successfully", file=sys.stderr)
130
-
131
- # Verify model features
132
- if hasattr(model, 'feature_names_in_'):
133
- print(f"Model expects features: {model.feature_names_in_}", file=sys.stderr)
134
-
135
- print("=" * 70 + "\n", file=sys.stderr)
136
-
137
- return model, None
138
-
139
- except Exception as e:
140
- retry_count += 1
141
- error_msg = f"Model loading attempt {retry_count}/{max_retries} failed: {str(e)}"
142
- print(f"βœ— {error_msg}", file=sys.stderr)
143
-
144
- if retry_count < max_retries:
145
- import time
146
- wait_time = 2 * retry_count
147
- print(f"Retrying in {wait_time} seconds...", file=sys.stderr)
148
- time.sleep(wait_time)
149
- else:
150
- import traceback
151
- print(f"Final traceback:\n{traceback.format_exc()}", file=sys.stderr)
152
- print("=" * 70 + "\n", file=sys.stderr)
153
- return None, error_msg
154
-
155
-
156
- def main():
157
- """Main application"""
158
 
159
- print("Starting main application...", file=sys.stderr)
 
 
 
 
160
 
161
- # Header
162
- st.markdown(
163
- '<div class="main-header">πŸ”§ Engine Predictive Maintenance System</div>',
164
- unsafe_allow_html=True
165
- )
166
- st.markdown(
167
- '<div class="sub-header">AI-powered engine health monitoring & failure prediction</div>',
168
- unsafe_allow_html=True
169
- )
170
-
171
- # Load model with progress indicator
172
- with st.spinner("Loading AI model... This may take a moment."):
173
- model, error = load_model()
174
 
175
- if model is None:
176
- st.error(f"❌ Failed to load prediction model")
177
- st.code(error)
178
-
179
- with st.expander("πŸ” Troubleshooting"):
180
- st.write("**Possible Issues:**")
181
- st.write("1. HF_TOKEN not set in Space secrets")
182
- st.write("2. Model repository is private")
183
- st.write("3. Model filename is incorrect")
184
- st.write("4. Network connectivity issue")
185
-
186
- st.write("\n**Current Configuration:**")
187
- st.write(f"- HF_TOKEN set: {os.environ.get('HF_TOKEN') is not None}")
188
- st.write("- Expected repo: Quantum9999/xgb-predictive-maintenance")
189
- st.write("- Expected file: xgb_tuned_model.joblib")
190
-
191
- st.write("\n**Your Setup (from screenshots):**")
192
- st.write("βœ… HF Space has HF_TOKEN secret (Image 1)")
193
- st.write("βœ… GitHub has HF_EN_TOKEN secret (Image 2)")
194
- st.write("βœ… GitHub token for pushing code (Image 3)")
195
-
196
- st.write("\n**Next Steps:**")
197
- st.write("1. Verify HF_TOKEN secret exists in Space settings")
198
- st.write("2. Check Space logs for detailed error messages")
199
- st.write("3. Ensure model repo is accessible")
200
-
201
- st.stop()
202
-
203
- st.success("βœ“ Model loaded successfully!")
204
-
205
- # Sidebar
206
- with st.sidebar:
207
- st.header("ℹ️ About")
208
- st.write(
209
- "This application predicts engine maintenance needs using "
210
- "machine learning analysis of 6 critical sensor parameters."
211
- )
212
-
213
- st.header("πŸ“Š Model Information")
214
- st.markdown("""
215
- - **Algorithm**: XGBoost Classifier
216
- - **Features**: 6 sensor readings
217
- - **Target Classes**:
218
- - 0: Normal Operation
219
- - 1: Maintenance Required
220
- - **Training Data**: 19,535 records
221
- """)
222
-
223
- st.header("🎯 How to Use")
224
- st.markdown("""
225
- 1. Enter current sensor readings
226
- 2. Click 'Predict Engine Condition'
227
- 3. Review prediction and confidence
228
- 4. Take action based on results
229
- """)
230
-
231
- st.header("πŸ“ˆ Sensor Ranges")
232
- st.markdown("""
233
- **Normal Operating Ranges:**
234
- - RPM: 161 - 2,239
235
- - Lub Oil Pressure: 0.003 - 7.3 bar
236
- - Fuel Pressure: 0.003 - 21.1 bar
237
- - Coolant Pressure: 0.002 - 7.5 bar
238
- - Lub Oil Temp: 71 - 90 Β°C
239
- - Coolant Temp: 62 - 196 Β°C
240
- """)
241
 
242
- # Main content
243
- st.header("πŸ“ Enter Engine Sensor Readings")
244
- st.markdown("---")
245
 
246
- # Input columns
247
- col1, col2 = st.columns(2)
 
248
 
249
- with col1:
250
- st.subheader("βš™οΈ Speed & Pressure Sensors")
251
-
252
- engine_rpm = st.number_input(
253
- "Engine RPM (Revolutions per Minute)",
254
- min_value=100.0,
255
- max_value=2500.0,
256
- value=791.0,
257
- step=10.0,
258
- help="Engine speed - Normal range: 161-2,239 RPM"
259
- )
260
-
261
- lub_oil_pressure = st.number_input(
262
- "Lubrication Oil Pressure (bar)",
263
- min_value=0.0,
264
- max_value=10.0,
265
- value=3.3,
266
- step=0.1,
267
- help="Lubricating oil pressure - Normal range: 0.003-7.266 bar"
268
- )
269
-
270
- fuel_pressure = st.number_input(
271
- "Fuel Pressure (bar)",
272
- min_value=0.0,
273
- max_value=25.0,
274
- value=6.7,
275
- step=0.1,
276
- help="Fuel delivery pressure - Normal range: 0.003-21.138 bar"
277
- )
278
 
279
- with col2:
280
- st.subheader("🌑️ Temperature & Coolant Sensors")
281
-
282
- coolant_pressure = st.number_input(
283
- "Coolant Pressure (bar)",
284
- min_value=0.0,
285
- max_value=10.0,
286
- value=2.3,
287
- step=0.1,
288
- help="Coolant system pressure - Normal range: 0.002-7.479 bar"
289
- )
290
-
291
- lub_oil_temp = st.number_input(
292
- "Lubrication Oil Temperature (Β°C)",
293
- min_value=60.0,
294
- max_value=100.0,
295
- value=77.6,
296
- step=0.5,
297
- help="Lubricating oil temperature - Normal range: 71.3-89.6 Β°C"
298
- )
299
-
300
- coolant_temp = st.number_input(
301
- "Coolant Temperature (Β°C)",
302
- min_value=50.0,
303
- max_value=200.0,
304
- value=78.4,
305
- step=0.5,
306
- help="Engine coolant temperature - Normal range: 61.7-195.5 Β°C"
307
- )
308
 
309
- # Prediction button
310
- st.markdown("---")
 
 
311
 
312
- if st.button("πŸ” Predict Engine Condition", use_container_width=True, type="primary"):
313
- # Create input DataFrame with exact column names
314
- input_df = pd.DataFrame([{
315
- "Engine rpm": engine_rpm,
316
- "Lub oil pressure": lub_oil_pressure,
317
- "Fuel pressure": fuel_pressure,
318
- "Coolant pressure": coolant_pressure,
319
- "lub oil temp": lub_oil_temp,
320
- "Coolant temp": coolant_temp
321
- }])
322
-
323
- try:
324
- print(f"Making prediction with input: {input_df.to_dict()}", file=sys.stderr)
325
-
326
- # Make prediction
327
- prediction = model.predict(input_df)[0]
328
- proba = model.predict_proba(input_df)[0]
329
-
330
- print(f"Prediction: {prediction}, Probabilities: {proba}", file=sys.stderr)
331
-
332
- # Display results
333
- st.markdown("---")
334
- st.header("🎯 Prediction Result")
335
-
336
- if prediction == 0:
337
- st.markdown(
338
- '<div class="prediction-box normal">βœ… Engine Operating Normally</div>',
339
- unsafe_allow_html=True
340
- )
341
- st.success("βœ“ No maintenance required at this time. Engine is functioning within normal parameters.")
342
- else:
343
- st.markdown(
344
- '<div class="prediction-box maintenance">⚠️ Maintenance Required</div>',
345
- unsafe_allow_html=True
346
- )
347
- st.warning("⚠ Engine shows signs of potential failure. Schedule maintenance as soon as possible to prevent breakdown.")
348
-
349
- # Confidence scores
350
- st.subheader("πŸ“Š Prediction Confidence")
351
-
352
- conf_col1, conf_col2 = st.columns(2)
353
-
354
- with conf_col1:
355
- st.markdown('<div class="metric-card">', unsafe_allow_html=True)
356
- st.metric(
357
- label="Normal Operation Probability",
358
- value=f"{proba[0]:.2%}",
359
- help="Confidence that engine is operating normally"
360
- )
361
- st.markdown('</div>', unsafe_allow_html=True)
362
-
363
- with conf_col2:
364
- st.markdown('<div class="metric-card">', unsafe_allow_html=True)
365
- st.metric(
366
- label="Maintenance Required Probability",
367
- value=f"{proba[1]:.2%}",
368
- help="Confidence that engine requires maintenance"
369
- )
370
- st.markdown('</div>', unsafe_allow_html=True)
371
-
372
- # Input summary
373
- with st.expander("πŸ“‹ View Input Summary"):
374
- st.dataframe(
375
- input_df.T.rename(columns={0: "Value"}),
376
- use_container_width=True
377
- )
378
-
379
- # Recommendations
380
- with st.expander("πŸ’‘ Recommendations"):
381
- if prediction == 0:
382
- st.markdown("""
383
- **Current Status: Healthy**
384
- - Continue regular monitoring
385
- - Maintain current maintenance schedule
386
- - Monitor for any sudden changes in sensor readings
387
- - Schedule next routine inspection as planned
388
- """)
389
- else:
390
- st.markdown("""
391
- **Immediate Actions Required:**
392
- - Schedule comprehensive engine inspection
393
- - Check lubrication system
394
- - Inspect cooling system
395
- - Review fuel delivery system
396
- - Monitor engine closely until serviced
397
- - Consider reducing operational load
398
- """)
399
-
400
- except Exception as e:
401
- error_msg = f"Prediction error: {e}"
402
- print(f"βœ— {error_msg}", file=sys.stderr)
403
- import traceback
404
- print(f"Traceback:\n{traceback.format_exc()}", file=sys.stderr)
405
-
406
- st.error(f"❌ {error_msg}")
407
- st.info("Please verify all sensor values are within valid ranges and try again.")
408
 
409
- # Footer
410
- st.markdown("---")
411
- st.markdown(
412
- "<p style='text-align: center; color: #666; font-size: 14px;'>"
413
- "πŸ€– Built with XGBoost & Streamlit | πŸ€— Model hosted on Hugging Face<br>"
414
- "Developed as part of ML Deployment & Automation Project"
415
- "</p>",
416
- unsafe_allow_html=True
417
- )
418
-
419
-
420
- if __name__ == "__main__":
421
- print("Entering main()...", file=sys.stderr)
422
  try:
423
- main()
424
- print("βœ“ Main completed successfully", file=sys.stderr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425
  except Exception as e:
426
- print(f"βœ— FATAL ERROR: {e}", file=sys.stderr)
427
- import traceback
428
- print(f"Traceback:\n{traceback.format_exc()}", file=sys.stderr)
429
- st.error(f"Application error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  """
2
+ DIAGNOSTIC VERSION - Streamlit App for Debugging
3
+ This version has extensive logging to find the exact failure point
4
  """
5
 
6
  import streamlit as st
 
 
7
  import sys
8
+ import os
9
 
10
+ print("=" * 80, file=sys.stderr)
11
+ print("DIAGNOSTIC APP STARTING", file=sys.stderr)
12
+ print("=" * 80, file=sys.stderr)
 
13
 
14
+ # Test 1: Page Config
15
+ try:
16
+ print("\n[TEST 1] Setting page config...", file=sys.stderr)
17
+ st.set_page_config(
18
+ page_title="Engine Predictive Maintenance - DIAGNOSTIC",
19
+ page_icon="πŸ”§",
20
+ layout="wide"
21
+ )
22
+ print("βœ“ Page config successful", file=sys.stderr)
23
+ except Exception as e:
24
+ print(f"βœ— Page config failed: {e}", file=sys.stderr)
25
+ import traceback
26
+ print(traceback.format_exc(), file=sys.stderr)
27
 
28
+ # Test 2: Imports
29
+ print("\n[TEST 2] Testing imports...", file=sys.stderr)
30
  try:
31
+ print(" Importing pandas...", file=sys.stderr)
32
+ import pandas as pd
33
+ print(" βœ“ pandas imported", file=sys.stderr)
34
+
35
+ print(" Importing huggingface_hub...", file=sys.stderr)
36
  from huggingface_hub import hf_hub_download, login
37
+ print(" βœ“ huggingface_hub imported", file=sys.stderr)
38
+
39
+ print(" Importing joblib...", file=sys.stderr)
40
  import joblib
41
+ print(" βœ“ joblib imported", file=sys.stderr)
42
+
43
  print("βœ“ All imports successful", file=sys.stderr)
44
  except Exception as e:
45
+ print(f"βœ— Import failed: {e}", file=sys.stderr)
46
+ import traceback
47
+ print(traceback.format_exc(), file=sys.stderr)
48
+ st.error(f"Import error: {e}")
49
  st.stop()
50
 
51
+ # Test 3: Environment Variables
52
+ print("\n[TEST 3] Checking environment variables...", file=sys.stderr)
53
+ hf_token = os.environ.get("HF_TOKEN")
54
+ print(f" HF_TOKEN exists: {hf_token is not None}", file=sys.stderr)
55
+ if hf_token:
56
+ print(f" HF_TOKEN length: {len(hf_token)}", file=sys.stderr)
57
+ print(f" HF_TOKEN starts with: {hf_token[:7]}...", file=sys.stderr)
58
+ else:
59
+ print(" ⚠ WARNING: HF_TOKEN not found!", file=sys.stderr)
60
+
61
+ # Test 4: Hugging Face Authentication
62
+ print("\n[TEST 4] Testing Hugging Face authentication...", file=sys.stderr)
63
+ if hf_token:
64
+ try:
65
+ print(" Attempting login...", file=sys.stderr)
66
+ login(token=hf_token)
67
+ print(" βœ“ Login successful", file=sys.stderr)
68
+ except Exception as e:
69
+ print(f" βœ— Login failed: {e}", file=sys.stderr)
70
+ import traceback
71
+ print(traceback.format_exc(), file=sys.stderr)
72
+ else:
73
+ print(" ⚠ Skipping login (no token)", file=sys.stderr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
+ # Test 5: Model Download
76
+ print("\n[TEST 5] Testing model download...", file=sys.stderr)
77
+ model = None
78
+ model_error = None
79
 
80
+ try:
81
+ print(" Repository: Quantum9999/xgb-predictive-maintenance", file=sys.stderr)
82
+ print(" Filename: xgb_tuned_model.joblib", file=sys.stderr)
83
+ print(" Cache dir: /tmp/hf_cache", file=sys.stderr)
84
+ print(" Starting download...", file=sys.stderr)
 
 
85
 
86
+ model_path = hf_hub_download(
87
+ repo_id="Quantum9999/xgb-predictive-maintenance",
88
+ filename="xgb_tuned_model.joblib",
89
+ token=hf_token,
90
+ cache_dir="/tmp/hf_cache"
91
+ )
92
 
93
+ print(f" βœ“ Download successful!", file=sys.stderr)
94
+ print(f" Model path: {model_path}", file=sys.stderr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ # Test 6: Model Loading
97
+ print("\n[TEST 6] Testing model loading...", file=sys.stderr)
98
+ print(" Loading model into memory...", file=sys.stderr)
99
+ model = joblib.load(model_path)
100
+ print(" βœ“ Model loaded successfully!", file=sys.stderr)
101
 
102
+ # Test 7: Model Properties
103
+ print("\n[TEST 7] Checking model properties...", file=sys.stderr)
104
+ print(f" Model type: {type(model)}", file=sys.stderr)
105
+ if hasattr(model, 'feature_names_in_'):
106
+ print(f" Expected features: {model.feature_names_in_}", file=sys.stderr)
107
+ if hasattr(model, 'n_features_in_'):
108
+ print(f" Number of features: {model.n_features_in_}", file=sys.stderr)
 
 
 
 
 
 
109
 
110
+ except Exception as e:
111
+ model_error = str(e)
112
+ print(f" βœ— Model loading failed: {e}", file=sys.stderr)
113
+ import traceback
114
+ print(traceback.format_exc(), file=sys.stderr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
 
116
+ print("\n" + "=" * 80, file=sys.stderr)
117
+ print("DIAGNOSTIC TESTS COMPLETED", file=sys.stderr)
118
+ print("=" * 80 + "\n", file=sys.stderr)
119
 
120
+ # Display results to user
121
+ st.title("πŸ” Diagnostic Mode")
122
+ st.write("This is a diagnostic version to identify the issue.")
123
 
124
+ st.header("Test Results:")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ st.subheader("1. Environment Variables")
127
+ if hf_token:
128
+ st.success(f"βœ“ HF_TOKEN found (length: {len(hf_token)})")
129
+ else:
130
+ st.error("βœ— HF_TOKEN not found in environment")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
+ st.subheader("2. Model Loading")
133
+ if model is not None:
134
+ st.success("βœ“ Model loaded successfully!")
135
+ st.write(f"Model type: {type(model)}")
136
 
137
+ if hasattr(model, 'feature_names_in_'):
138
+ st.write("Expected features:")
139
+ st.code(str(model.feature_names_in_))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140
 
141
+ # Try a test prediction
142
+ st.subheader("3. Test Prediction")
 
 
 
 
 
 
 
 
 
 
 
143
  try:
144
+ import pandas as pd
145
+ test_input = pd.DataFrame([{
146
+ "Engine rpm": 791.0,
147
+ "Lub oil pressure": 3.3,
148
+ "Fuel pressure": 6.7,
149
+ "Coolant pressure": 2.3,
150
+ "lub oil temp": 77.6,
151
+ "Coolant temp": 78.4
152
+ }])
153
+
154
+ prediction = model.predict(test_input)[0]
155
+ proba = model.predict_proba(test_input)[0]
156
+
157
+ st.success("βœ“ Test prediction successful!")
158
+ st.write(f"Prediction: {prediction}")
159
+ st.write(f"Probabilities: {proba}")
160
+
161
  except Exception as e:
162
+ st.error(f"βœ— Test prediction failed: {e}")
163
+ st.code(str(e))
164
+
165
+ else:
166
+ st.error("βœ— Model loading failed!")
167
+ if model_error:
168
+ st.code(model_error)
169
+ st.warning("Check Container logs for detailed traceback")
170
+
171
+ st.divider()
172
+ st.info("πŸ“ Check the 'Container' logs tab for detailed diagnostic information")
173
+
174
+ st.header("Next Steps:")
175
+ if model is not None:
176
+ st.success("βœ… Everything works! The issue might be with the healthcheck timing.")
177
+ st.write("Recommendation: Just wait longer for the Space to become healthy, or increase healthcheck start-period to 90s")
178
+ else:
179
+ st.error("There's a real issue with model loading.")
180
+ st.write("Common causes:")
181
+ st.write("1. HF_TOKEN is wrong or expired")
182
+ st.write("2. Model file doesn't exist in the repository")
183
+ st.write("3. Network/connectivity issue")
184
+ st.write("4. File permissions issue")