import os import json import time from datetime import datetime import numpy as np import pandas as pd import streamlit as st import matplotlib.pyplot as plt import seaborn as sns import joblib import zipfile import io import gc # ML imports from sklearn.model_selection import train_test_split from sklearn.linear_model import LinearRegression, Ridge from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor from sklearn.preprocessing import StandardScaler, PolynomialFeatures from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.metrics import mean_squared_error, r2_score # SHAP import shap # Optuna (used later) import optuna from sklearn.model_selection import cross_val_score, KFold from sklearn.neural_network import MLPRegressor # --- Safe defaults for Streamlit session state --- defaults = { "llm_result": None, "automl_summary": {}, "shap_recommendations": [], "hf_clicked": False, "hf_ran_once": False, "run_automl_clicked": False, } for k, v in defaults.items(): st.session_state.setdefault(k, v) if "llm_result" not in st.session_state: st.session_state["llm_result"] = None if "automl_summary" not in st.session_state: st.session_state["automl_summary"] = {} if "shap_recommendations" not in st.session_state: st.session_state["shap_recommendations"] = [] if "hf_clicked" not in st.session_state: st.session_state["hf_clicked"] = False # ------------------------- # Config & paths # ------------------------- st.set_page_config(page_title="Steel Authority of India Limited (MODEX)", layout="wide") plt.style.use("seaborn-v0_8-muted") sns.set_palette("muted") sns.set_style("whitegrid") LOG_DIR = "./logs" os.makedirs(LOG_DIR, exist_ok=True) # Permanent artifact filenames (never change) CSV_PATH = os.path.join(LOG_DIR, "flatfile_universe_advanced.csv") META_PATH = os.path.join(LOG_DIR, "feature_metadata_advanced.json") ENSEMBLE_PATH = os.path.join(LOG_DIR, "ensemble_models.joblib") LOG_PATH = os.path.join(LOG_DIR, "run_master.log") # Simple logger that time-stamps inside one file SESSION_STARTED = False def log(msg: str): global SESSION_STARTED stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") with open(LOG_PATH, "a", encoding="utf-8") as f: if not SESSION_STARTED: f.write("\n\n===== New Session Started at {} =====\n".format(stamp)) SESSION_STARTED = True f.write(f"[{stamp}] {msg}\n") print(msg) log("=== Streamlit session started ===") if os.path.exists("/data"): st.sidebar.success(f" Using persistent storage | Logs directory: {LOG_DIR}") else: st.sidebar.warning(f" Using ephemeral storage | Logs directory: {LOG_DIR}. Data will be lost on rebuild.") # ------------------------- # Utility: generate advanced dataset if missing # ------------------------- def generate_advanced_flatfile( n_rows=3000, random_seed=42, max_polynomial_new=60, global_variance_multiplier=1.0, variance_overrides=None, ): """ Generates a large synthetic, physics-aligned dataset with many engineered features. Allows control of variability per feature (through variance_overrides) or globally (via global_variance_multiplier). """ np.random.seed(random_seed) os.makedirs(LOG_DIR, exist_ok=True) if variance_overrides is None: variance_overrides = {} # --- base natural features across 8 use cases (expanded) natural_feats = [ "vibration_x","vibration_y","motor_current","rpm","bearing_temp","ambient_temp","lube_pressure","power_factor", "furnace_temp","tap_temp","slag_temp","offgas_co","offgas_co2","o2_probe_pct","c_feed_rate","arc_power","furnace_pressure","feed_time", "mold_temp","casting_speed","nozzle_pressure","cooling_water_temp","billet_length","chemical_C","chemical_Mn","chemical_Si","chemical_S", "roll_speed","motor_load","coolant_flow","exit_temp","strip_thickness","line_tension","roller_vibration", "lighting_intensity","surface_temp","image_entropy_proxy", "spectro_Fe","spectro_C","spectro_Mn","spectro_Si","time_since_last_sample", "batch_id_numeric","weight_input","weight_output","time_in_queue","conveyor_speed", "shell_temp","lining_thickness","water_flow","cooling_out_temp","heat_flux" ] natural_feats = list(dict.fromkeys(natural_feats)) # dedupe # helper: compute adjusted stddev def effective_sd(feature_name, base_sd): # exact name override if feature_name in variance_overrides: return float(variance_overrides[feature_name]) # substring override for key, val in variance_overrides.items(): if key in feature_name: return float(val) # fallback: scaled base return float(base_sd) * float(global_variance_multiplier) # helper sampling heuristics def sample_col(name, n): name_l = name.lower() if "furnace_temp" in name_l or name_l.endswith("_temp") or "tap_temp" in name_l: sd = effective_sd("furnace_temp", 50) return np.random.normal(1550, sd, n) if name_l in ("tap_temp","mold_temp","shell_temp","cooling_out_temp","exit_temp"): sd = effective_sd(name_l, 30) return np.random.normal(200 if "mold" not in name_l else 1500, sd, n) if "offgas_co2" in name_l: sd = effective_sd("offgas_co2", 4) return np.abs(np.random.normal(15, sd, n)) if "offgas_co" in name_l: sd = effective_sd("offgas_co", 5) return np.abs(np.random.normal(20, sd, n)) if "o2" in name_l: sd = effective_sd("o2_probe_pct", 1) return np.clip(np.random.normal(5, sd, n), 0.01, 60) if "arc_power" in name_l or "motor_load" in name_l: sd = effective_sd("arc_power", 120) return np.abs(np.random.normal(600, sd, n)) if "rpm" in name_l: sd = effective_sd("rpm", 30) return np.abs(np.random.normal(120, sd, n)) if "vibration" in name_l: sd = effective_sd("vibration", 0.15) return np.abs(np.random.normal(0.4, sd, n)) if "bearing_temp" in name_l: sd = effective_sd("bearing_temp", 5) return np.random.normal(65, sd, n) if "chemical" in name_l or "spectro" in name_l: sd = effective_sd("chemical", 0.15) return np.random.normal(0.7, sd, n) if "weight" in name_l: sd = effective_sd("weight", 100) return np.random.normal(1000, sd, n) if "conveyor_speed" in name_l or "casting_speed" in name_l: sd = effective_sd("casting_speed", 0.6) return np.random.normal(2.5, sd, n) if "power_factor" in name_l: sd = effective_sd("power_factor", 0.03) return np.clip(np.random.normal(0.92, sd, n), 0.6, 1.0) if "image_entropy_proxy" in name_l: sd = effective_sd("image_entropy_proxy", 0.25) return np.abs(np.random.normal(0.5, sd, n)) if "batch_id" in name_l: return np.random.randint(1000,9999,n) if "time_since" in name_l or "time_in_queue" in name_l: sd = effective_sd("time_since", 20) return np.abs(np.random.normal(30, sd, n)) if "heat_flux" in name_l: sd = effective_sd("heat_flux", 300) return np.abs(np.random.normal(1000, sd, n)) return np.random.normal(0, effective_sd(name_l, 1), n) # build DataFrame df = pd.DataFrame({c: sample_col(c, n_rows) for c in natural_feats}) # timestamps & metadata start = pd.Timestamp("2025-01-01T00:00:00") df["timestamp"] = pd.date_range(start, periods=n_rows, freq="min") df["cycle_minute"] = np.mod(np.arange(n_rows), 80) df["meta_plant_name"] = np.random.choice(["Rourkela","Bhilai","Durgapur","Bokaro","Burnpur","Salem"], n_rows) df["meta_country"] = "India" # --- synthetic features: physics informed proxies df["carbon_proxy"] = df["offgas_co"] / (df["offgas_co2"] + 1.0) df["oxygen_utilization"] = df["offgas_co2"] / (df["offgas_co"] + 1.0) df["power_density"] = df["arc_power"] / (df["weight_input"] + 1.0) df["energy_efficiency"] = df["furnace_temp"] / (df["arc_power"] + 1.0) df["slag_foaming_index"] = (df["slag_temp"] * df["offgas_co"]) / (df["o2_probe_pct"] + 1.0) df["yield_ratio"] = df["weight_output"] / (df["weight_input"] + 1e-9) # rolling stats, lags, rocs for a prioritized set rolling_cols = ["arc_power","furnace_temp","offgas_co","offgas_co2","motor_current","vibration_x","weight_input"] for rc in rolling_cols: if rc in df.columns: df[f"{rc}_roll_mean_3"] = df[rc].rolling(3, min_periods=1).mean() df[f"{rc}_roll_std_5"] = df[rc].rolling(5, min_periods=1).std().fillna(0) df[f"{rc}_lag1"] = df[rc].shift(1).bfill() df[f"{rc}_roc_1"] = df[rc].diff().fillna(0) # interaction & polynomial-lite df["arc_o2_interaction"] = df["arc_power"] * df["o2_probe_pct"] df["carbon_power_ratio"] = df["carbon_proxy"] / (df["arc_power"] + 1e-6) df["temp_power_sqrt"] = df["furnace_temp"] * np.sqrt(np.abs(df["arc_power"]) + 1e-6) # polynomial features limited to first 12 numeric columns numeric = df.select_dtypes(include=[np.number]).fillna(0) poly_source_cols = numeric.columns[:12].tolist() poly = PolynomialFeatures(degree=2, interaction_only=False, include_bias=False) poly_mat = poly.fit_transform(numeric[poly_source_cols]) poly_names = poly.get_feature_names_out(poly_source_cols) poly_df = pd.DataFrame(poly_mat, columns=[f"poly__{n}" for n in poly_names], index=df.index) keep_poly = [c for c in poly_df.columns if c.replace("poly__","") not in poly_source_cols] poly_df = poly_df[keep_poly].iloc[:, :max_polynomial_new] if len(keep_poly) > 0 else poly_df.iloc[:, :0] df = pd.concat([df, poly_df], axis=1) # PCA embeddings across numeric sensors scaler = StandardScaler() scaled = scaler.fit_transform(numeric) pca = PCA(n_components=6, random_state=42) pca_cols = pca.fit_transform(scaled) for i in range(pca_cols.shape[1]): df[f"pca_{i+1}"] = pca_cols[:, i] # KMeans cluster label for operating mode kmeans = KMeans(n_clusters=6, random_state=42, n_init=10) df["operating_mode"] = kmeans.fit_predict(scaled) # surrogate models surrogate_df = df.copy() surrogate_df["furnace_temp_next"] = surrogate_df["furnace_temp"].shift(-1).ffill() features_for_surrogate = [c for c in ["furnace_temp","arc_power","o2_probe_pct","offgas_co","offgas_co2"] if c in df.columns] if len(features_for_surrogate) >= 2: X = surrogate_df[features_for_surrogate].fillna(0) y = surrogate_df["furnace_temp_next"] rf = RandomForestRegressor(n_estimators=50, random_state=42, n_jobs=-1) rf.fit(X, y) df["pred_temp_30s"] = rf.predict(X) else: df["pred_temp_30s"] = df["furnace_temp"] if all(c in df.columns for c in ["offgas_co","offgas_co2","o2_probe_pct"]): X2 = df[["offgas_co","offgas_co2","o2_probe_pct"]].fillna(0) rf2 = RandomForestRegressor(n_estimators=50, random_state=1, n_jobs=-1) rf2.fit(X2, df["carbon_proxy"]) df["pred_carbon_5min"] = rf2.predict(X2) else: df["pred_carbon_5min"] = df["carbon_proxy"] # safety indices & flags df["refractory_limit_flag"] = (df["lining_thickness"] < 140).astype(int) df["max_allowed_power_delta"] = np.clip(df["arc_power"].diff().abs().fillna(0), 0, 2000) # rule-based target df["ARC_ON"] = ((df["arc_power"] > df["arc_power"].median()) & (df["carbon_proxy"] < 1.0)).astype(int) df["prediction_confidence"] = np.clip(np.random.beta(2,5, n_rows), 0.05, 0.99) # clean NaN and infinite df.replace([np.inf, -np.inf], np.nan, inplace=True) df.bfill(inplace=True) df.fillna(0, inplace=True) # save CSV & metadata df["run_timestamp"] = datetime.now().strftime("%Y%m%d_%H%M%S") if os.path.exists(CSV_PATH): df.to_csv(CSV_PATH, mode="a", index=False, header=False) else: df.to_csv(CSV_PATH, index=False) # append run-summary entry to metadata JSON meta_entry = { "timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "features": len(df.columns), "rows_added": len(df), "note": "auto-generated block appended" } if os.path.exists(META_PATH): existing = json.load(open(META_PATH)) existing.append(meta_entry) else: existing = [meta_entry] json.dump(existing, open(META_PATH, "w"), indent=2) PDF_PATH = None return CSV_PATH, META_PATH, PDF_PATH # ------------------------- # Ensure dataset exists # ------------------------- if not os.path.exists(CSV_PATH) or not os.path.exists(META_PATH): with st.spinner("Generating synthetic features (this may take ~20-60s)..."): CSV_PATH, META_PATH, PDF_PATH = generate_advanced_flatfile(n_rows=3000, random_seed=42, max_polynomial_new=80) st.success(f"Generated dataset and metadata: {CSV_PATH}") # ------------------------- # Load data & metadata (cached) # ------------------------- @st.cache_data def load_data(csv_path=CSV_PATH, meta_path=META_PATH): df_local = pd.read_csv(csv_path) with open(meta_path, "r") as f: meta_local = json.load(f) return df_local, pd.DataFrame(meta_local) df, meta_df = load_data() df = df.loc[:, ~df.columns.duplicated()] # ------------------------- # Sidebar filters & UI # ------------------------- st.sidebar.title("Feature Explorer - Advanced + SHAP") def ensure_feature_metadata(df: pd.DataFrame, meta_df: pd.DataFrame) -> pd.DataFrame: """Ensure metadata dataframe matches feature count & has required columns.""" required_cols = ["feature_name", "source_type", "formula", "remarks"] if meta_df is None or len(meta_df) < len(df.columns): meta_df = pd.DataFrame({ "feature_name": df.columns, "source_type": [ "engineered" if any(x in c for x in ["poly", "pca", "roll", "lag"]) else "measured" for c in df.columns ], "formula": ["" for _ in df.columns], "remarks": ["auto-inferred synthetic feature metadata" for _ in df.columns], }) st.sidebar.warning("Metadata was summary-only — rebuilt feature-level metadata.") else: for col in required_cols: if col not in meta_df.columns: meta_df[col] = None if meta_df["feature_name"].isna().all(): meta_df["feature_name"] = df.columns if len(meta_df) > len(df.columns): meta_df = meta_df.iloc[: len(df.columns)] return meta_df meta_df = ensure_feature_metadata(df, meta_df) feat_types = sorted(meta_df["source_type"].dropna().unique().tolist()) selected_types = st.sidebar.multiselect("Feature type", feat_types, default=feat_types) if "source_type" not in meta_df.columns or meta_df["source_type"].dropna().empty: filtered_meta = meta_df.copy() else: filtered_meta = meta_df[meta_df["source_type"].isin(selected_types)] numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist() # ------------------------- # Tabs layout # ------------------------- tabs = st.tabs([ "Features", "Visualization", "Correlations", "Statistics", "AutoML + SHAP", "Business Impact", "Bibliography", "Download Saved Files", "View Logs" ]) # ----- Feature metadata with tabs[0]: st.subheader("Feature metadata") st.dataframe( filtered_meta[["feature_name", "source_type", "formula", "remarks"]] .rename(columns={"feature_name": "Feature"}), height=400 ) st.markdown(f"Total features loaded: **{df.shape[1]}** | Rows: **{df.shape[0]}**") # ----- Visualization tab with tabs[1]: st.subheader("Feature Visualization") col = st.selectbox("Choose numeric feature", numeric_cols, index=0) bins = st.slider("Histogram bins", 10, 200, 50) fig, ax = plt.subplots(figsize=(8, 4)) sns.histplot(df[col], bins=bins, kde=True, ax=ax, color="#2C6E91", alpha=0.8) ax.set_title(f"Distribution of {col}", fontsize=12) st.pyplot(fig, clear_figure=True) st.write(df[col].describe().to_frame().T) if all(x in df.columns for x in ["pca_1", "pca_2", "operating_mode"]): st.markdown("### PCA Feature Space — Colored by Operating Mode") fig2, ax2 = plt.subplots(figsize=(6, 5)) sns.scatterplot( data=df.sample(min(1000, len(df)), random_state=42), x="pca_1", y="pca_2", hue="operating_mode", palette="tab10", alpha=0.7, s=40, ax=ax2 ) ax2.set_title("Operating Mode Clusters (PCA Projection)") st.pyplot(fig2, clear_figure=True) # ----- Correlations tab with tabs[2]: st.subheader("Correlation explorer") default_corr = numeric_cols[:20] if len(numeric_cols) >= 20 else numeric_cols corr_sel = st.multiselect("Select features (min 2)", numeric_cols, default=default_corr) if len(corr_sel) >= 2: corr = df[corr_sel].corr() fig, ax = plt.subplots(figsize=(10,8)) sns.heatmap(corr, cmap="RdBu_r", center=0, annot=True, fmt=".2f", linewidths=0.5, cbar_kws={"shrink": 0.7}, ax=ax) st.pyplot(fig, clear_figure=True) else: st.info("Choose at least 2 numeric features to compute correlation.") # ----- Stats tab with tabs[3]: st.subheader("Summary statistics (numeric features)") st.dataframe(df.describe().T.style.format("{:.3f}"), height=500) # ----- AutoML + SHAP tab (Expanded) with tabs[4]: st.subheader("AutoML Ensemble — Expanded Families + Stacking + SHAP") # --- Universal numeric cleaner (runs once per tab) --- def clean_entire_df(df): """Cleans dataframe of any bracketed/scientific string numbers like '[1.551E3]'.""" df_clean = df.copy() for col in df_clean.columns: if df_clean[col].dtype == object: df_clean[col] = ( df_clean[col] .astype(str) .str.replace("[", "", regex=False) .str.replace("]", "", regex=False) .str.replace(",", "", regex=False) .str.strip() .replace(["nan", "NaN", "None", "null", "N/A", "", " "], np.nan) ) df_clean[col] = pd.to_numeric(df_clean[col], errors="coerce") df_clean = df_clean.fillna(0.0).astype(float) return df_clean df = clean_entire_df(df) st.caption("✅ Dataset cleaned globally — all numeric-like values converted safely.") # --- Use Case Selection --- use_case = st.selectbox( "Select Use Case", [ "Predictive Maintenance", "EAF Data Intelligence", "Casting Quality Optimization", "Rolling Mill Energy Optimization", "Surface Defect Detection (Vision AI)", "Material Composition & Alloy Mix AI", "Inventory & Yield Optimization", "Refractory & Cooling Loss Prediction", ], index=1, ) use_case_config = { "Predictive Maintenance": {"target": "bearing_temp", "model_hint": "RandomForest"}, "EAF Data Intelligence": {"target": "furnace_temp", "model_hint": "GradientBoosting"}, "Casting Quality Optimization": {"target": "surface_temp", "model_hint": "GradientBoosting"}, "Rolling Mill Energy Optimization": {"target": "energy_efficiency", "model_hint": "ExtraTrees"}, "Surface Defect Detection (Vision AI)": {"target": "image_entropy_proxy", "model_hint": "GradientBoosting"}, "Material Composition & Alloy Mix AI": {"target": "chemical_C", "model_hint": "RandomForest"}, "Inventory & Yield Optimization": {"target": "yield_ratio", "model_hint": "GradientBoosting"}, "Refractory & Cooling Loss Prediction": {"target": "lining_thickness", "model_hint": "ExtraTrees"}, } cfg = use_case_config.get(use_case, {"target": numeric_cols[0], "model_hint": "RandomForest"}) target, model_hint = cfg["target"], cfg["model_hint"] suggested = [c for c in numeric_cols if any(k in c for k in target.split("_"))] if len(suggested) < 6: suggested = [c for c in numeric_cols if any(k in c for k in ["temp", "power", "energy", "pressure", "yield"])] if len(suggested) < 6: suggested = numeric_cols[:50] features = st.multiselect("Model input features (auto-suggested)", numeric_cols, default=suggested) st.markdown(f"Auto target: `{target}` · Suggested family hint: `{model_hint}`") # --- Sampling configuration --- max_rows = min(df.shape[0], 20000) sample_size = st.slider("Sample rows", 500, max_rows, min(1500, max_rows), step=100) # --- Prepare data --- target_col = target if target in df.columns else next((c for c in df.columns if target.lower() in c.lower()), None) if not target_col: st.error(f"Target `{target}` not found in dataframe.") st.stop() cols_needed = [c for c in features if c in df.columns and c != target_col] sub_df = df.loc[:, cols_needed + [target_col]].sample(n=sample_size, random_state=42).reset_index(drop=True) X = sub_df.drop(columns=[target_col]) y = pd.Series(np.ravel(sub_df[target_col]), name=target_col) # --- Drop constant or leak columns --- leak_cols = ["furnace_temp_next", "pred_temp_30s", "run_timestamp", "timestamp", "batch_id_numeric", "batch_id"] X = X.drop(columns=[c for c in leak_cols if c in X.columns], errors="ignore") X = X.loc[:, X.nunique() > 1] # --- AutoML Settings --- st.markdown("### Ensemble & AutoML Settings") max_trials = st.slider("Optuna trials per family", 5, 80, 20, step=5) top_k = st.slider("Max base models in ensemble", 2, 8, 5) allow_advanced = st.checkbox("Include advanced families (XGBoost, LightGBM, CatBoost)", value=True) available_models = ["RandomForest", "ExtraTrees"] optional_families = {} if allow_advanced: try: import xgboost as xgb; optional_families["XGBoost"] = True; available_models.append("XGBoost") except Exception: optional_families["XGBoost"] = False try: import lightgbm as lgb; optional_families["LightGBM"] = True; available_models.append("LightGBM") except Exception: optional_families["LightGBM"] = False try: import catboost as cb; optional_families["CatBoost"] = True; available_models.append("CatBoost") except Exception: optional_families["CatBoost"] = False st.markdown(f"Available families: {', '.join(available_models)}") # --- Family tuner --- def tune_family(fam, X_local, y_local, n_trials=20): import optuna from sklearn.model_selection import cross_val_score from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor def obj(trial): if fam == "RandomForest": m = RandomForestRegressor( n_estimators=trial.suggest_int("n_estimators", 100, 800), max_depth=trial.suggest_int("max_depth", 4, 30), random_state=42, n_jobs=-1, ) elif fam == "ExtraTrees": m = ExtraTreesRegressor( n_estimators=trial.suggest_int("n_estimators", 100, 800), max_depth=trial.suggest_int("max_depth", 4, 30), random_state=42, n_jobs=-1, ) elif fam == "XGBoost" and optional_families.get("XGBoost"): m = xgb.XGBRegressor( n_estimators=trial.suggest_int("n_estimators", 100, 800), max_depth=trial.suggest_int("max_depth", 3, 12), learning_rate=trial.suggest_float("lr", 0.01, 0.3, log=True), tree_method="hist", verbosity=0 ) elif fam == "LightGBM" and optional_families.get("LightGBM"): m = lgb.LGBMRegressor( n_estimators=trial.suggest_int("n_estimators", 100, 800), max_depth=trial.suggest_int("max_depth", 3, 16), learning_rate=trial.suggest_float("lr", 0.01, 0.3, log=True) ) elif fam == "CatBoost" and optional_families.get("CatBoost"): m = cb.CatBoostRegressor( iterations=trial.suggest_int("iterations", 200, 800), depth=trial.suggest_int("depth", 4, 10), learning_rate=trial.suggest_float("lr", 0.01, 0.3, log=True), verbose=0 ) else: m = RandomForestRegressor(random_state=42) try: return np.mean(cross_val_score(m, X_local, y_local, cv=3, scoring="r2")) except Exception: return -999 study = optuna.create_study(direction="maximize") study.optimize(obj, n_trials=n_trials, show_progress_bar=False) params = study.best_trial.params if study.trials else {} model = RandomForestRegressor(random_state=42) return {"family": fam, "model_obj": model, "best_params": params, "cv_score": study.best_value} # --- Run button --- if st.button("Run AutoML + SHAP"): with st.spinner("Training and stacking..."): tuned_results = [] families = ["RandomForest", "ExtraTrees"] if allow_advanced: for f in ["XGBoost", "LightGBM", "CatBoost"]: if optional_families.get(f): families.append(f) for fam in families: tuned_results.append(tune_family(fam, X, y, n_trials=max_trials)) lb = pd.DataFrame([{"family": r["family"], "cv_r2": r["cv_score"]} for r in tuned_results]).sort_values("cv_r2", ascending=False) st.dataframe(lb.round(4)) # --- Stacking --- from sklearn.feature_selection import SelectKBest, f_regression from sklearn.linear_model import LinearRegression from sklearn.model_selection import KFold, train_test_split from sklearn.metrics import r2_score scaler = StandardScaler() X_scaled = pd.DataFrame(scaler.fit_transform(X), columns=X.columns) selector = SelectKBest(f_regression, k=min(40, X_scaled.shape[1])) X_sel = pd.DataFrame(selector.fit_transform(X_scaled, y), columns=[X.columns[i] for i in selector.get_support(indices=True)]) kf = KFold(n_splits=5, shuffle=True, random_state=42) oof_preds, base_models = pd.DataFrame(index=X_sel.index), [] for fam, entry in [(r["family"], r) for r in tuned_results if r.get("model_obj")]: model = entry["model_obj"] preds = np.zeros(X_sel.shape[0]) for tr, va in kf.split(X_sel): model.fit(X_sel.iloc[tr], y.iloc[tr]) preds[va] = model.predict(X_sel.iloc[va]) oof_preds[f"{fam}_oof"] = preds model.fit(X_sel, y) base_models.append({"family": fam, "model": model}) meta = LinearRegression(positive=True) meta.fit(oof_preds, y) y_pred = meta.predict(oof_preds) final_r2 = r2_score(y, y_pred) st.success(f"Stacked Ensemble R² = {final_r2:.4f}") # --- Operator Advisory --- st.markdown("---") st.subheader("Operator Advisory — Real-Time Recommendations") try: top_base = base_models[0]["model"] sample_X = X_sel.sample(min(300, len(X_sel)), random_state=42) expl = shap.TreeExplainer(top_base) shap_vals = expl.shap_values(sample_X) if isinstance(shap_vals, list): shap_vals = shap_vals[0] imp = pd.DataFrame({ "Feature": sample_X.columns, "Mean |SHAP|": np.abs(shap_vals).mean(axis=0), "Mean SHAP Sign": np.sign(shap_vals).mean(axis=0) }).sort_values("Mean |SHAP|", ascending=False) st.dataframe(imp.head(5)) recs = [] for _, r in imp.head(5).iterrows(): if r["Mean SHAP Sign"] > 0.05: recs.append(f"Increase `{r['Feature']}` likely increases `{target}`") elif r["Mean SHAP Sign"] < -0.05: recs.append(f"Decrease `{r['Feature']}` likely increases `{target}`") else: recs.append(f"`{r['Feature']}` neutral for `{target}`") st.write("\n".join(recs)) # --- Hugging Face advisory --- import requests, json, textwrap HF_TOKEN = os.getenv("HF_TOKEN") if not HF_TOKEN: st.error("HF_TOKEN not detected.") else: API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3-8B-Instruct" headers = {"Authorization": f"Bearer {HF_TOKEN}"} prompt = textwrap.dedent(f""" You are an expert metallurgical advisor. Recommendations: {recs} Target: {target} Use case: {use_case} Summarize in three professional lines for the shift operator. """) payload = {"inputs": prompt, "parameters": {"max_new_tokens": 120, "temperature": 0.6}} with st.spinner("Generating advisory (Llama-3-8B)…"): resp = requests.post(API_URL, headers=headers, json=payload, timeout=90) try: data = resp.json() text = "" if isinstance(data, list) and len(data) > 0 and "generated_text" in data[0]: text = data[0]["generated_text"].strip() elif isinstance(data, dict) and "generated_text" in data: text = data["generated_text"].strip() if text: st.success("✅ Operator Advisory Generated:") st.info(text) else: st.warning("Operator advisory skipped: no text returned.") except Exception as e: st.warning(f"Operator advisory skipped: {e}") except Exception as e: st.warning(f"Operator advisory skipped: {e}") # ----- Business Impact tab with tabs[5]: st.subheader("Business Impact Metrics") target_table = pd.DataFrame([ ["EAF Data Intelligence", "furnace_temp / tap_temp", "Central control variable", "₹20–60 L/year"], ["Casting Optimization", "surface_temp / cooling_water_temp", "Controls billet quality", "₹50 L/year"], ["Rolling Mill", "energy_efficiency", "Energy optimization", "₹5–10 L/year"], ["Refractory Loss Prediction", "lining_thickness / heat_loss_rate", "Wear and downtime", "₹40 L/year"], ], columns=["Use Case","Target Variable","Why It’s Ideal","Business Leverage"]) st.dataframe(target_table, width="stretch") # ----- Bibliography tab with tabs[6]: st.subheader("Annotated Bibliography") refs = [ ("A Survey of Data-Driven Soft Sensing in Ironmaking Systems","Yan et al. (2024)","Soft sensors validate `furnace_temp` and `tap_temp`.","https://doi.org/10.1021/acsomega.4c01254"), ("Optimisation of Operator Support Systems","Ojeda Roldán et al. (2022)","Reinforcement learning for endpoint control.","https://doi.org/10.3390/jmmp6020034"), ("Analyzing the Energy Efficiency of Electric Arc Furnace Steelmaking","Zhuo et al. (2024)","Links arc power and energy KPIs.","https://doi.org/10.3390/met15010113"), ("Dynamic EAF Modeling and Slag Foaming Index Prediction","MacRosty et al.","Supports refractory wear modeling.","https://www.sciencedirect.com/science/article/pii/S0921883123004019") ] for t,a,n,u in refs: st.markdown(f"**[{t}]({u})** — *{a}* \n_{n}_") # ----- Download tab with tabs[7]: st.subheader("Download Saved Files") files = [f for f in os.listdir(LOG_DIR) if os.path.isfile(os.path.join(LOG_DIR, f))] if not files: st.info("No files yet — run AutoML first.") else: for f in sorted(files): path = os.path.join(LOG_DIR, f) with open(path,"rb") as fp: st.download_button(f"Download {f}", fp, file_name=f) # ----- Logs tab with tabs[8]: st.subheader("Master Log") if os.path.exists(LOG_PATH): txt = open(LOG_PATH).read() st.text_area("Log Output", txt, height=400) st.download_button("Download Log", txt, file_name="run_master.log") else: st.info("No logs yet — run AutoML once.") st.markdown("---") st.markdown("**Note:** Synthetic demo dataset for educational use only. Real deployment requires plant data, NDA, and safety validation.")