update optimization
Browse files- app.py +2 -2
- src/heart_disease_core.py +2 -2
app.py
CHANGED
|
@@ -121,7 +121,7 @@ def _bar_for_models(results: dict):
|
|
| 121 |
|
| 122 |
def run_predict(*vals):
|
| 123 |
if STATE["df"] is None or STATE["models"] is None:
|
| 124 |
-
return "❌ Models not initialized. Reload the app.",
|
| 125 |
|
| 126 |
input_dict = {col: vals[i] for i, col in enumerate(CLEVELAND_FEATURES_ORDER)}
|
| 127 |
results = predict_all(STATE["models"], input_dict)
|
|
@@ -257,7 +257,7 @@ with gr.Blocks(theme="gstaff/sketch", css=vlai_template.custom_css, fill_width=T
|
|
| 257 |
- k-NN: distance weighting, Manhattan metric, optimized neighbors
|
| 258 |
- Random Forest: 200 trees, class balancing, feature sampling
|
| 259 |
- Gradient Boosting: regularization, subsampling, lower learning rate
|
| 260 |
-
- AdaBoost: SAMME
|
| 261 |
- XGBoost: L1/L2 regularization, optimal depth and learning rate
|
| 262 |
- **Feature descriptions**:
|
| 263 |
- `age`: Patient age in years
|
|
|
|
| 121 |
|
| 122 |
def run_predict(*vals):
|
| 123 |
if STATE["df"] is None or STATE["models"] is None:
|
| 124 |
+
return None, "❌ Models not initialized. Reload the app.", pd.DataFrame()
|
| 125 |
|
| 126 |
input_dict = {col: vals[i] for i, col in enumerate(CLEVELAND_FEATURES_ORDER)}
|
| 127 |
results = predict_all(STATE["models"], input_dict)
|
|
|
|
| 257 |
- k-NN: distance weighting, Manhattan metric, optimized neighbors
|
| 258 |
- Random Forest: 200 trees, class balancing, feature sampling
|
| 259 |
- Gradient Boosting: regularization, subsampling, lower learning rate
|
| 260 |
+
- AdaBoost: SAMME algorithm, increased estimators
|
| 261 |
- XGBoost: L1/L2 regularization, optimal depth and learning rate
|
| 262 |
- **Feature descriptions**:
|
| 263 |
- `age`: Patient age in years
|
src/heart_disease_core.py
CHANGED
|
@@ -189,7 +189,7 @@ def build_models() -> Dict[str, Pipeline]:
|
|
| 189 |
random_state=42,
|
| 190 |
n_estimators=150, # More estimators
|
| 191 |
learning_rate=0.8, # Slower learning for stability
|
| 192 |
-
algorithm="SAMME
|
| 193 |
))
|
| 194 |
])
|
| 195 |
|
|
@@ -241,7 +241,7 @@ def build_models() -> Dict[str, Pipeline]:
|
|
| 241 |
("rf", RandomForestClassifier(random_state=42, n_estimators=200, max_depth=10,
|
| 242 |
min_samples_split=5, min_samples_leaf=2, max_features="sqrt",
|
| 243 |
class_weight="balanced", n_jobs=-1)),
|
| 244 |
-
("ada", AdaBoostClassifier(random_state=42, n_estimators=150, learning_rate=0.8, algorithm="SAMME
|
| 245 |
("gb", GradientBoostingClassifier(random_state=42, n_estimators=150, learning_rate=0.08,
|
| 246 |
max_depth=4, min_samples_split=10, min_samples_leaf=4,
|
| 247 |
subsample=0.8, max_features="sqrt")),
|
|
|
|
| 189 |
random_state=42,
|
| 190 |
n_estimators=150, # More estimators
|
| 191 |
learning_rate=0.8, # Slower learning for stability
|
| 192 |
+
algorithm="SAMME" # Compatible algorithm for newer sklearn
|
| 193 |
))
|
| 194 |
])
|
| 195 |
|
|
|
|
| 241 |
("rf", RandomForestClassifier(random_state=42, n_estimators=200, max_depth=10,
|
| 242 |
min_samples_split=5, min_samples_leaf=2, max_features="sqrt",
|
| 243 |
class_weight="balanced", n_jobs=-1)),
|
| 244 |
+
("ada", AdaBoostClassifier(random_state=42, n_estimators=150, learning_rate=0.8, algorithm="SAMME")),
|
| 245 |
("gb", GradientBoostingClassifier(random_state=42, n_estimators=150, learning_rate=0.08,
|
| 246 |
max_depth=4, min_samples_split=10, min_samples_leaf=4,
|
| 247 |
subsample=0.8, max_features="sqrt")),
|