Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -1155,6 +1155,30 @@ def load_model_manual():
|
|
| 1155 |
except Exception as e:
|
| 1156 |
logger.error(f"Manual model loading error: {str(e)}")
|
| 1157 |
return jsonify({'success': False, 'error': str(e)}), 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1158 |
@app.route('/health')
|
| 1159 |
def health():
|
| 1160 |
"""Health check endpoint"""
|
|
@@ -1180,7 +1204,7 @@ if __name__ == '__main__':
|
|
| 1180 |
logger.info("Starting AEGIS Economics AI with LangGraph...")
|
| 1181 |
logger.info(f"LangGraph available: {LANGGRAPH_AVAILABLE}")
|
| 1182 |
|
| 1183 |
-
# Load model
|
| 1184 |
logger.info("Loading model from Gaston895/Aegisecon1...")
|
| 1185 |
model_loaded = load_model()
|
| 1186 |
|
|
@@ -1189,4 +1213,17 @@ if __name__ == '__main__':
|
|
| 1189 |
else:
|
| 1190 |
logger.error("❌ Model failed to load. Starting server anyway, but model endpoints will fail.")
|
| 1191 |
|
| 1192 |
-
app.run(host='0.0.0.0', port=7860, debug=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1155 |
except Exception as e:
|
| 1156 |
logger.error(f"Manual model loading error: {str(e)}")
|
| 1157 |
return jsonify({'success': False, 'error': str(e)}), 500
|
| 1158 |
+
|
| 1159 |
+
@app.route('/startup_check')
|
| 1160 |
+
def startup_check():
|
| 1161 |
+
"""Check startup status and trigger model loading if needed"""
|
| 1162 |
+
try:
|
| 1163 |
+
status = {
|
| 1164 |
+
'model_loaded': chat_pipeline is not None,
|
| 1165 |
+
'tokenizer_loaded': tokenizer is not None,
|
| 1166 |
+
'langgraph_available': LANGGRAPH_AVAILABLE
|
| 1167 |
+
}
|
| 1168 |
+
|
| 1169 |
+
# If model not loaded, try to load it
|
| 1170 |
+
if not chat_pipeline:
|
| 1171 |
+
logger.info("Model not loaded, attempting to load...")
|
| 1172 |
+
success = load_model()
|
| 1173 |
+
status['model_load_attempted'] = True
|
| 1174 |
+
status['model_load_success'] = success
|
| 1175 |
+
status['model_loaded'] = chat_pipeline is not None
|
| 1176 |
+
|
| 1177 |
+
return jsonify(status)
|
| 1178 |
+
|
| 1179 |
+
except Exception as e:
|
| 1180 |
+
logger.error(f"Startup check error: {str(e)}")
|
| 1181 |
+
return jsonify({'error': str(e)}), 500
|
| 1182 |
@app.route('/health')
|
| 1183 |
def health():
|
| 1184 |
"""Health check endpoint"""
|
|
|
|
| 1204 |
logger.info("Starting AEGIS Economics AI with LangGraph...")
|
| 1205 |
logger.info(f"LangGraph available: {LANGGRAPH_AVAILABLE}")
|
| 1206 |
|
| 1207 |
+
# Load model immediately on startup
|
| 1208 |
logger.info("Loading model from Gaston895/Aegisecon1...")
|
| 1209 |
model_loaded = load_model()
|
| 1210 |
|
|
|
|
| 1213 |
else:
|
| 1214 |
logger.error("❌ Model failed to load. Starting server anyway, but model endpoints will fail.")
|
| 1215 |
|
| 1216 |
+
app.run(host='0.0.0.0', port=7860, debug=False)
|
| 1217 |
+
else:
|
| 1218 |
+
# For production deployment (Gunicorn), load model when module is imported
|
| 1219 |
+
logger.info("Production mode: Loading model during module import...")
|
| 1220 |
+
logger.info(f"LangGraph available: {LANGGRAPH_AVAILABLE}")
|
| 1221 |
+
|
| 1222 |
+
# Load model immediately
|
| 1223 |
+
logger.info("Loading model from Gaston895/Aegisecon1...")
|
| 1224 |
+
model_loaded = load_model()
|
| 1225 |
+
|
| 1226 |
+
if model_loaded:
|
| 1227 |
+
logger.info("✅ Model loaded successfully for production!")
|
| 1228 |
+
else:
|
| 1229 |
+
logger.error("❌ Model failed to load in production mode!")
|