singhn9's picture
Update src/streamlit_app.py
7986d55 verified
raw
history blame
62.9 kB
import os
import json
import time
from datetime import datetime
import numpy as np
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
import seaborn as sns
import joblib
import zipfile
import io
# ML imports
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import mean_squared_error, r2_score
# SHAP
import shap
if "llm_result" not in st.session_state:
st.session_state["llm_result"] = None
if "automl_summary" not in st.session_state:
st.session_state["automl_summary"] = {}
if "shap_recommendations" not in st.session_state:
st.session_state["shap_recommendations"] = []
# -------------------------
# Config & paths
# -------------------------
st.set_page_config(page_title="Steel Authority of India Limited (MODEX)", layout="wide")
plt.style.use("seaborn-v0_8-muted")
sns.set_palette("muted")
sns.set_style("whitegrid")
LOG_DIR = "./logs"
os.makedirs(LOG_DIR, exist_ok=True)
# Permanent artifact filenames (never change)
CSV_PATH = os.path.join(LOG_DIR, "flatfile_universe_advanced.csv")
META_PATH = os.path.join(LOG_DIR, "feature_metadata_advanced.json")
ENSEMBLE_PATH = os.path.join(LOG_DIR, "ensemble_models.joblib")
LOG_PATH = os.path.join(LOG_DIR, "run_master.log")
# Simple logger that time-stamps inside one file
SESSION_STARTED = False
def log(msg: str):
global SESSION_STARTED
stamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open(LOG_PATH, "a", encoding="utf-8") as f:
if not SESSION_STARTED:
f.write("\n\n===== New Session Started at {} =====\n".format(stamp))
SESSION_STARTED = True
f.write(f"[{stamp}] {msg}\n")
print(msg)
log("=== Streamlit session started ===")
if os.path.exists("/data"):
st.sidebar.success(f" Using persistent storage | Logs directory: {LOG_DIR}")
else:
st.sidebar.warning(f" Using ephemeral storage | Logs directory: {LOG_DIR}. Data will be lost on rebuild.")
# -------------------------
# Utility: generate advanced dataset if missing
# -------------------------
def generate_advanced_flatfile(
n_rows=3000,
random_seed=42,
max_polynomial_new=60,
global_variance_multiplier=1.0,
variance_overrides=None,
):
"""
Generates a large synthetic, physics-aligned dataset with many engineered features.
Allows control of variability per feature (through variance_overrides) or globally
(via global_variance_multiplier).
Args:
n_rows: number of samples
random_seed: RNG seed
max_polynomial_new: limit on number of polynomial expansion features
global_variance_multiplier: multiplier applied to all default stddevs
variance_overrides: dict mapping feature name or substring → stddev multiplier
"""
np.random.seed(random_seed)
os.makedirs(LOG_DIR, exist_ok=True)
if variance_overrides is None:
variance_overrides = {}
# --- base natural features across 8 use cases (expanded)
natural_feats = [
"vibration_x","vibration_y","motor_current","rpm","bearing_temp","ambient_temp","lube_pressure","power_factor",
"furnace_temp","tap_temp","slag_temp","offgas_co","offgas_co2","o2_probe_pct","c_feed_rate","arc_power","furnace_pressure","feed_time",
"mold_temp","casting_speed","nozzle_pressure","cooling_water_temp","billet_length","chemical_C","chemical_Mn","chemical_Si","chemical_S",
"roll_speed","motor_load","coolant_flow","exit_temp","strip_thickness","line_tension","roller_vibration",
"lighting_intensity","surface_temp","image_entropy_proxy",
"spectro_Fe","spectro_C","spectro_Mn","spectro_Si","time_since_last_sample",
"batch_id_numeric","weight_input","weight_output","time_in_queue","conveyor_speed",
"shell_temp","lining_thickness","water_flow","cooling_out_temp","heat_flux"
]
natural_feats = list(dict.fromkeys(natural_feats)) # dedupe
# helper: compute adjusted stddev
def effective_sd(feature_name, base_sd):
# exact name override
if feature_name in variance_overrides:
return float(variance_overrides[feature_name])
# substring override
for key, val in variance_overrides.items():
if key in feature_name:
return float(val)
# fallback: scaled base
return float(base_sd) * float(global_variance_multiplier)
# helper sampling heuristics
def sample_col(name, n):
name_l = name.lower()
if "furnace_temp" in name_l or name_l.endswith("_temp") or "tap_temp" in name_l:
sd = effective_sd("furnace_temp", 50)
return np.random.normal(1550, sd, n)
if name_l in ("tap_temp","mold_temp","shell_temp","cooling_out_temp","exit_temp"):
sd = effective_sd(name_l, 30)
return np.random.normal(200 if "mold" not in name_l else 1500, sd, n)
if "offgas_co2" in name_l:
sd = effective_sd("offgas_co2", 4)
return np.abs(np.random.normal(15, sd, n))
if "offgas_co" in name_l:
sd = effective_sd("offgas_co", 5)
return np.abs(np.random.normal(20, sd, n))
if "o2" in name_l:
sd = effective_sd("o2_probe_pct", 1)
return np.clip(np.random.normal(5, sd, n), 0.01, 60)
if "arc_power" in name_l or "motor_load" in name_l:
sd = effective_sd("arc_power", 120)
return np.abs(np.random.normal(600, sd, n))
if "rpm" in name_l:
sd = effective_sd("rpm", 30)
return np.abs(np.random.normal(120, sd, n))
if "vibration" in name_l:
sd = effective_sd("vibration", 0.15)
return np.abs(np.random.normal(0.4, sd, n))
if "bearing_temp" in name_l:
sd = effective_sd("bearing_temp", 5)
return np.random.normal(65, sd, n)
if "chemical" in name_l or "spectro" in name_l:
sd = effective_sd("chemical", 0.15)
return np.random.normal(0.7, sd, n)
if "weight" in name_l:
sd = effective_sd("weight", 100)
return np.random.normal(1000, sd, n)
if "conveyor_speed" in name_l or "casting_speed" in name_l:
sd = effective_sd("casting_speed", 0.6)
return np.random.normal(2.5, sd, n)
if "power_factor" in name_l:
sd = effective_sd("power_factor", 0.03)
return np.clip(np.random.normal(0.92, sd, n), 0.6, 1.0)
if "image_entropy_proxy" in name_l:
sd = effective_sd("image_entropy_proxy", 0.25)
return np.abs(np.random.normal(0.5, sd, n))
if "batch_id" in name_l:
return np.random.randint(1000,9999,n)
if "time_since" in name_l or "time_in_queue" in name_l:
sd = effective_sd("time_since", 20)
return np.abs(np.random.normal(30, sd, n))
if "heat_flux" in name_l:
sd = effective_sd("heat_flux", 300)
return np.abs(np.random.normal(1000, sd, n))
return np.random.normal(0, effective_sd(name_l, 1), n)
# build DataFrame
df = pd.DataFrame({c: sample_col(c, n_rows) for c in natural_feats})
# timestamps & metadata
start = pd.Timestamp("2025-01-01T00:00:00")
df["timestamp"] = pd.date_range(start, periods=n_rows, freq="min")
df["cycle_minute"] = np.mod(np.arange(n_rows), 80)
df["meta_plant_name"] = np.random.choice(["Rourkela","Bhilai","Durgapur","Bokaro","Burnpur","Salem"], n_rows)
df["meta_country"] = "India"
# --- synthetic features: physics informed proxies
df["carbon_proxy"] = df["offgas_co"] / (df["offgas_co2"] + 1.0)
df["oxygen_utilization"] = df["offgas_co2"] / (df["offgas_co"] + 1.0)
df["power_density"] = df["arc_power"] / (df["weight_input"] + 1.0)
df["energy_efficiency"] = df["furnace_temp"] / (df["arc_power"] + 1.0)
df["slag_foaming_index"] = (df["slag_temp"] * df["offgas_co"]) / (df["o2_probe_pct"] + 1.0)
df["yield_ratio"] = df["weight_output"] / (df["weight_input"] + 1e-9)
# rolling stats, lags, rocs for a prioritized set
rolling_cols = ["arc_power","furnace_temp","offgas_co","offgas_co2","motor_current","vibration_x","weight_input"]
for rc in rolling_cols:
if rc in df.columns:
df[f"{rc}_roll_mean_3"] = df[rc].rolling(3, min_periods=1).mean()
df[f"{rc}_roll_std_5"] = df[rc].rolling(5, min_periods=1).std().fillna(0)
df[f"{rc}_lag1"] = df[rc].shift(1).bfill()
df[f"{rc}_roc_1"] = df[rc].diff().fillna(0)
# interaction & polynomial-lite
df["arc_o2_interaction"] = df["arc_power"] * df["o2_probe_pct"]
df["carbon_power_ratio"] = df["carbon_proxy"] / (df["arc_power"] + 1e-6)
df["temp_power_sqrt"] = df["furnace_temp"] * np.sqrt(np.abs(df["arc_power"]) + 1e-6)
# polynomial features limited to first 12 numeric columns
numeric = df.select_dtypes(include=[np.number]).fillna(0)
poly_source_cols = numeric.columns[:12].tolist()
poly = PolynomialFeatures(degree=2, interaction_only=False, include_bias=False)
poly_mat = poly.fit_transform(numeric[poly_source_cols])
poly_names = poly.get_feature_names_out(poly_source_cols)
poly_df = pd.DataFrame(poly_mat, columns=[f"poly__{n}" for n in poly_names], index=df.index)
keep_poly = [c for c in poly_df.columns if c.replace("poly__","") not in poly_source_cols]
poly_df = poly_df[keep_poly].iloc[:, :max_polynomial_new] if len(keep_poly) > 0 else poly_df.iloc[:, :0]
df = pd.concat([df, poly_df], axis=1)
# PCA embeddings across numeric sensors
scaler = StandardScaler()
scaled = scaler.fit_transform(numeric)
pca = PCA(n_components=6, random_state=42)
pca_cols = pca.fit_transform(scaled)
for i in range(pca_cols.shape[1]):
df[f"pca_{i+1}"] = pca_cols[:, i]
# KMeans cluster label for operating mode
kmeans = KMeans(n_clusters=6, random_state=42, n_init=10)
df["operating_mode"] = kmeans.fit_predict(scaled)
# surrogate models
surrogate_df = df.copy()
surrogate_df["furnace_temp_next"] = surrogate_df["furnace_temp"].shift(-1).ffill()
features_for_surrogate = [c for c in ["furnace_temp","arc_power","o2_probe_pct","offgas_co","offgas_co2"] if c in df.columns]
if len(features_for_surrogate) >= 2:
X = surrogate_df[features_for_surrogate].fillna(0)
y = surrogate_df["furnace_temp_next"]
rf = RandomForestRegressor(n_estimators=50, random_state=42, n_jobs=-1)
rf.fit(X, y)
df["pred_temp_30s"] = rf.predict(X)
else:
df["pred_temp_30s"] = df["furnace_temp"]
if all(c in df.columns for c in ["offgas_co","offgas_co2","o2_probe_pct"]):
X2 = df[["offgas_co","offgas_co2","o2_probe_pct"]].fillna(0)
rf2 = RandomForestRegressor(n_estimators=50, random_state=1, n_jobs=-1)
rf2.fit(X2, df["carbon_proxy"])
df["pred_carbon_5min"] = rf2.predict(X2)
else:
df["pred_carbon_5min"] = df["carbon_proxy"]
# safety indices & flags
df["refractory_limit_flag"] = (df["lining_thickness"] < 140).astype(int)
df["max_allowed_power_delta"] = np.clip(df["arc_power"].diff().abs().fillna(0), 0, 2000)
# rule-based target
df["ARC_ON"] = ((df["arc_power"] > df["arc_power"].median()) & (df["carbon_proxy"] < 1.0)).astype(int)
df["prediction_confidence"] = np.clip(np.random.beta(2,5, n_rows), 0.05, 0.99)
# clean NaN and infinite
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.bfill(inplace=True)
df.fillna(0, inplace=True)
# save CSV & metadata
df["run_timestamp"] = datetime.now().strftime("%Y%m%d_%H%M%S")
if os.path.exists(CSV_PATH):
df.to_csv(CSV_PATH, mode="a", index=False, header=False)
else:
df.to_csv(CSV_PATH, index=False)
# append run-summary entry to metadata JSON
meta_entry = {
"timestamp": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
"features": len(df.columns),
"rows_added": len(df),
"note": "auto-generated block appended"
}
if os.path.exists(META_PATH):
existing = json.load(open(META_PATH))
existing.append(meta_entry)
else:
existing = [meta_entry]
json.dump(existing, open(META_PATH, "w"), indent=2)
PDF_PATH = None
# annotated bibliography
# try:
# from fpdf import FPDF
# pdf = FPDF('P','mm','A4')
# pdf.add_page()
# pdf.set_font("Helvetica","B",14)
# pdf.cell(0,8,"Annotated Bibliography - Metallurgical AI (Selected Papers)", ln=True)
# pdf.ln(2)
# pdf.set_font("Helvetica","",10)
# pdf.cell(0,6,"Generated: " + datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC"), ln=True)
# pdf.ln(4)
# bib_items = [
# ("A Survey of Data-Driven Soft Sensing in Ironmaking Systems","Yan et al. (2024)","Review of soft-sensors; supports gas proxies, lags, PCA."),
# ("Optimisation of Oxygen Blowing Process using RL","Ojeda Roldan et al. (2022)","RL for oxygen control; motivates surrogate predicted states & safety indices."),
# ("Analyzing the Energy Efficiency of Electric Arc Furnace","Zhuo et al. (2024)","Energy KPIs (kWh/t) motivate power_density & energy_efficiency features."),
# ("BOF/Endpoint prediction techniques","Springer (2024)","Endpoint prediction; supports temporal lags and cycle encoding."),
# ("Dynamic EAF modeling & slag foaming","MacRosty et al.","Physics priors for slag_foaming_index and refractory health modeling.")
# ]
# for title, auth, note in bib_items:
# pdf.set_font("Helvetica","B",11)
# pdf.multi_cell(0,6, f"{title} — {auth}")
# pdf.set_font("Helvetica","",10)
# pdf.multi_cell(0,5, f"Notes: {note}")
# pdf.ln(2)
# pdf.output(PDF_PATH)
# except Exception as e:
# with open(PDF_PATH.replace(".pdf",".txt"), "w") as tf:
# tf.write("Annotated bibliography generated. Install fpdf for PDF output.\n")
return CSV_PATH, META_PATH, PDF_PATH
# -------------------------
# Ensure dataset exists
# -------------------------
if not os.path.exists(CSV_PATH) or not os.path.exists(META_PATH):
with st.spinner("Generating synthetic features (this may take ~20-60s)..."):
CSV_PATH, META_PATH, PDF_PATH = generate_advanced_flatfile(n_rows=3000, random_seed=42, max_polynomial_new=80)
st.success(f"Generated dataset and metadata: {CSV_PATH}")
# -------------------------
# Load data & metadata (cached)
# -------------------------
@st.cache_data
def load_data(csv_path=CSV_PATH, meta_path=META_PATH):
df_local = pd.read_csv(csv_path)
with open(meta_path, "r") as f:
meta_local = json.load(f)
return df_local, pd.DataFrame(meta_local)
df, meta_df = load_data()
# -------------------------
# Sidebar filters & UI (FINAL ROBUST VERSION)
# -------------------------
st.sidebar.title("Feature Explorer - Advanced + SHAP")
def ensure_feature_metadata(df: pd.DataFrame, meta_df: pd.DataFrame) -> pd.DataFrame:
"""Ensure metadata dataframe matches feature count & has required columns."""
required_cols = ["feature_name", "source_type", "formula", "remarks"]
# If metadata missing or too short, rebuild it entirely
if meta_df is None or len(meta_df) < len(df.columns):
meta_df = pd.DataFrame({
"feature_name": df.columns,
"source_type": [
"engineered" if any(x in c for x in ["poly", "pca", "roll", "lag"]) else "measured"
for c in df.columns
],
"formula": ["" for _ in df.columns],
"remarks": ["auto-inferred synthetic feature metadata" for _ in df.columns],
})
st.sidebar.warning("Metadata was summary-only — rebuilt feature-level metadata.")
else:
# Ensure required columns exist
for col in required_cols:
if col not in meta_df.columns:
meta_df[col] = None
# Fill feature_name if blank or NaN
if meta_df["feature_name"].isna().all():
meta_df["feature_name"] = df.columns
# Clip to same number of features (safety)
if len(meta_df) > len(df.columns):
meta_df = meta_df.iloc[: len(df.columns)]
return meta_df
meta_df = ensure_feature_metadata(df, meta_df)
# Build sidebar safely
feat_types = sorted(meta_df["source_type"].dropna().unique().tolist())
selected_types = st.sidebar.multiselect("Feature type", feat_types, default=feat_types)
if "source_type" not in meta_df.columns or meta_df["source_type"].dropna().empty:
filtered_meta = meta_df.copy()
else:
filtered_meta = meta_df[meta_df["source_type"].isin(selected_types)]
numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
# -------------------------
# Features tab (robust)
# -------------------------
tabs = st.tabs([
"Features",
"Visualization",
"Correlations",
"Statistics",
"AutoML + SHAP",
"Business Impact",
"Bibliography",
"Download Saved Files",
"View Logs"
])
with tabs[0]:
st.subheader("Feature metadata")
st.dataframe(
filtered_meta[["feature_name", "source_type", "formula", "remarks"]]
.rename(columns={"feature_name": "Feature"}),
height=400
)
st.markdown(f"Total features loaded: **{df.shape[1]}** | Rows: **{df.shape[0]}**")
# ----- Visualize tab
with tabs[1]:
st.subheader("Feature Visualization")
col = st.selectbox("Choose numeric feature", numeric_cols, index=0)
bins = st.slider("Histogram bins", 10, 200, 50)
# --- Improved Histogram with style ---
fig, ax = plt.subplots(figsize=(8, 4))
sns.histplot(df[col], bins=bins, kde=True, ax=ax, color="#2C6E91", alpha=0.8)
ax.set_title(f"Distribution of {col.replace('_', ' ').title()}", fontsize=12)
ax.set_xlabel(col.replace("_", " ").title(), fontsize=10)
ax.set_ylabel("Frequency", fontsize=10)
sns.despine()
st.pyplot(fig, clear_figure=True)
st.write(df[col].describe().to_frame().T)
# --- Add PCA scatter visualization ---
if all(x in df.columns for x in ["pca_1", "pca_2", "operating_mode"]):
st.markdown("### PCA Feature Space — Colored by Operating Mode")
fig2, ax2 = plt.subplots(figsize=(6, 5))
sns.scatterplot(
data=df.sample(min(1000, len(df)), random_state=42),
x="pca_1", y="pca_2", hue="operating_mode",
palette="tab10", alpha=0.7, s=40, ax=ax2
)
ax2.set_title("Operating Mode Clusters (PCA Projection)", fontsize=12)
ax2.set_xlabel("PCA 1")
ax2.set_ylabel("PCA 2")
ax2.legend(title="Operating Mode", bbox_to_anchor=(1.05, 1), loc="upper left")
sns.despine()
st.pyplot(fig2, clear_figure=True)
# ----- Correlations tab
with tabs[2]:
st.subheader("Correlation explorer")
default_corr = numeric_cols[:20] if len(numeric_cols) >= 20 else numeric_cols
corr_sel = st.multiselect("Select features (min 2)", numeric_cols, default=default_corr)
if len(corr_sel) >= 2:
corr = df[corr_sel].corr()
fig, ax = plt.subplots(figsize=(10,8))
sns.heatmap(
corr, cmap="RdBu_r", center=0, annot=True, fmt=".2f",
linewidths=0.5, cbar_kws={"shrink": 0.7}, ax=ax
)
ax.set_title("Feature Correlation Matrix", fontsize=12)
sns.despine()
st.pyplot(fig, clear_figure=True)
else:
st.info("Choose at least 2 numeric features to compute correlation.")
# ----- Stats tab
with tabs[3]:
st.subheader("Summary statistics (numeric features)")
st.dataframe(df.describe().T.style.format("{:.3f}"), height=500)
# ----- Ensemble + SHAP tab (Expanded AutoML + Stacking + Multi-Family) -----
with tabs[4]:
st.subheader(" AutoML Ensemble — Expanded Families + Stacking + SHAP")
# --- Step 0: High-level Use Case (keeps previous defaults) ---
st.markdown("### Choose Industrial Use Case ")
use_case = st.selectbox(
"Select Use Case",
[
"Predictive Maintenance",
"EAF Data Intelligence",
"Casting Quality Optimization",
"Rolling Mill Energy Optimization",
"Surface Defect Detection (Vision AI)",
"Material Composition & Alloy Mix AI",
"Inventory & Yield Optimization",
"Refractory & Cooling Loss Prediction"
],
index=1
)
# Map use-case -> defaults (same as before)
use_case_config = {
"Predictive Maintenance": {"target": "bearing_temp", "model_hint": "RandomForest"},
"EAF Data Intelligence": {"target": "furnace_temp", "model_hint": "GradientBoosting"},
"Casting Quality Optimization": {"target": "surface_temp" if "surface_temp" in numeric_cols else "furnace_temp", "model_hint": "GradientBoosting"},
"Rolling Mill Energy Optimization": {"target": "energy_efficiency", "model_hint": "ExtraTrees"},
"Surface Defect Detection (Vision AI)": {"target": "image_entropy_proxy", "model_hint": "GradientBoosting"},
"Material Composition & Alloy Mix AI": {"target": "chemical_C", "model_hint": "RandomForest"},
"Inventory & Yield Optimization": {"target": "yield_ratio", "model_hint": "GradientBoosting"},
"Refractory & Cooling Loss Prediction": {"target": "lining_thickness", "model_hint": "ExtraTrees"},
}
cfg = use_case_config.get(use_case, {"target": numeric_cols[0], "model_hint": "RandomForest"})
target = cfg["target"]
model_hint = cfg["model_hint"]
# --- Feature auto-suggestion (keeps your earlier heuristic) ---
suggested = [c for c in numeric_cols if any(k in c for k in target.split('_'))]
if len(suggested) < 6:
suggested = [c for c in numeric_cols if any(k in c for k in ["temp", "power", "energy", "pressure", "yield"])]
if len(suggested) < 6:
suggested = numeric_cols[:50]
features = st.multiselect("Model input features (auto-suggested)", numeric_cols, default=suggested)
st.markdown(f"Auto target: `{target}` · Suggested family hint: `{model_hint}`")
# --- Data sampling controls ---
max_rows = min(df.shape[0], 20000)
sample_size = st.slider("Sample rows (train speed vs fidelity)", 500, max_rows, min(1500, max_rows), step=100)
sub_df = df[features + [target]].sample(n=sample_size, random_state=42).reset_index(drop=True)
X = sub_df[features].fillna(0)
y = sub_df[target].fillna(0)
# --- Ensemble control UI ---
st.markdown("### Ensemble & AutoML Settings")
max_trials = st.slider("Optuna trials per family (total trials grow with families)", 5, 80, 20, step=5)
top_k = st.slider("Max base models to keep in final ensemble", 2, 8, 5)
allow_advanced = st.checkbox("Include advanced families (XGBoost, LightGBM, CatBoost, TabPFN if installed)", value=True)
# --- Conditional imports (graceful fallbacks) ---
available_models = ["RandomForest", "ExtraTrees"] # always available (sklearn)
optional_families = {}
if allow_advanced:
try:
import xgboost as xgb
optional_families["XGBoost"] = True
available_models.append("XGBoost")
except Exception:
optional_families["XGBoost"] = False
try:
import lightgbm as lgb
optional_families["LightGBM"] = True
available_models.append("LightGBM")
except Exception:
optional_families["LightGBM"] = False
try:
import catboost as cb
optional_families["CatBoost"] = True
available_models.append("CatBoost")
except Exception:
optional_families["CatBoost"] = False
try:
# TabPFN is often packaged differently; attempt import but it's optional
import tabpfn
optional_families["TabPFN"] = True
available_models.append("TabPFN")
except Exception:
optional_families["TabPFN"] = False
try:
# FT-Transformer optional
from pytorch_tabular.models import transformers # may not be installed
optional_families["FTTransformer"] = True
available_models.append("FTTransformer")
except Exception:
optional_families["FTTransformer"] = False
st.markdown(f"Available model families: {', '.join(available_models)}")
# --- Optuna tuning routine per family ---
import optuna
from sklearn.model_selection import cross_val_score, KFold
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.linear_model import Ridge
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import r2_score, mean_squared_error
def tune_family(family_name, X_local, y_local, n_trials=20, random_state=42):
"""Tune one model family using Optuna; returns best (model_obj, cv_score, best_params)."""
def obj(trial):
# sample hyperparams per family
if family_name == "RandomForest":
n_estimators = trial.suggest_int("n_estimators", 100, 800)
max_depth = trial.suggest_int("max_depth", 4, 30)
m = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, n_jobs=-1, random_state=random_state)
elif family_name == "ExtraTrees":
n_estimators = trial.suggest_int("n_estimators", 100, 800)
max_depth = trial.suggest_int("max_depth", 4, 30)
m = ExtraTreesRegressor(n_estimators=n_estimators, max_depth=max_depth, n_jobs=-1, random_state=random_state)
elif family_name == "XGBoost" and optional_families.get("XGBoost"):
n_estimators = trial.suggest_int("n_estimators", 100, 1000)
max_depth = trial.suggest_int("max_depth", 3, 12)
lr = trial.suggest_float("learning_rate", 0.01, 0.3, log=True)
m = xgb.XGBRegressor(n_estimators=n_estimators, max_depth=max_depth, learning_rate=lr, tree_method="hist", verbosity=0, random_state=random_state, n_jobs=1)
elif family_name == "LightGBM" and optional_families.get("LightGBM"):
n_estimators = trial.suggest_int("n_estimators", 100, 1000)
max_depth = trial.suggest_int("max_depth", 3, 16)
lr = trial.suggest_float("learning_rate", 0.01, 0.3, log=True)
m = lgb.LGBMRegressor(n_estimators=n_estimators, max_depth=max_depth, learning_rate=lr, n_jobs=1, random_state=random_state)
elif family_name == "CatBoost" and optional_families.get("CatBoost"):
iterations = trial.suggest_int("iterations", 200, 1000)
depth = trial.suggest_int("depth", 4, 10)
lr = trial.suggest_float("learning_rate", 0.01, 0.3, log=True)
m = cb.CatBoostRegressor(iterations=iterations, depth=depth, learning_rate=lr, verbose=0, random_state=random_state)
elif family_name == "MLP":
hidden = trial.suggest_int("hidden_layer_sizes", 32, 512, log=True)
lr = trial.suggest_float("learning_rate_init", 1e-4, 1e-1, log=True)
m = MLPRegressor(hidden_layer_sizes=(hidden,), learning_rate_init=lr, max_iter=500, random_state=random_state)
elif family_name == "TabPFN" and optional_families.get("TabPFN"):
# TabPFN often works without hyperparams exposure; return a surrogate score using quick fit
# We'll call its predict_proba style API if available; as fallback use a mean score to let stacking consider it.
# For tuning, just return a placeholder; we'll build model object later.
return 0.0
else:
# fallback to a small RandomForest to avoid crashing
m = RandomForestRegressor(n_estimators=200, max_depth=8, random_state=random_state, n_jobs=-1)
# use negative RMSE if better for our domain? keep R2 for generality
try:
scores = cross_val_score(m, X_local, y_local, scoring="r2", cv=3, n_jobs=1)
return float(np.mean(scores))
except Exception:
return -999.0
study = optuna.create_study(direction="maximize")
study.optimize(obj, n_trials=n_trials, show_progress_bar=False)
best = study.best_trial.params if study.trials else {}
# instantiate best model
try:
if family_name == "RandomForest":
model = RandomForestRegressor(n_estimators=best.get("n_estimators",200), max_depth=best.get("max_depth",8), n_jobs=-1, random_state=42)
elif family_name == "ExtraTrees":
model = ExtraTreesRegressor(n_estimators=best.get("n_estimators",200), max_depth=best.get("max_depth",8), n_jobs=-1, random_state=42)
elif family_name == "XGBoost" and optional_families.get("XGBoost"):
model = xgb.XGBRegressor(n_estimators=best.get("n_estimators",200), max_depth=best.get("max_depth",6), learning_rate=best.get("learning_rate",0.1), tree_method="hist", verbosity=0, random_state=42, n_jobs=1)
elif family_name == "LightGBM" and optional_families.get("LightGBM"):
model = lgb.LGBMRegressor(n_estimators=best.get("n_estimators",200), max_depth=best.get("max_depth",8), learning_rate=best.get("learning_rate",0.1), n_jobs=1, random_state=42)
elif family_name == "CatBoost" and optional_families.get("CatBoost"):
model = cb.CatBoostRegressor(iterations=best.get("iterations",200), depth=best.get("depth",6), learning_rate=best.get("learning_rate",0.1), verbose=0, random_state=42)
elif family_name == "MLP":
model = MLPRegressor(hidden_layer_sizes=(best.get("hidden_layer_sizes",128),), learning_rate_init=best.get("learning_rate_init",0.001), max_iter=500, random_state=42)
elif family_name == "TabPFN" and optional_families.get("TabPFN"):
# We'll create a small wrapper for TabPFN later on train time
model = "TabPFN_placeholder"
else:
model = RandomForestRegressor(n_estimators=200, max_depth=8, random_state=42, n_jobs=-1)
except Exception:
model = RandomForestRegressor(n_estimators=200, max_depth=8, random_state=42, n_jobs=-1)
# compute cross-validated score for the best model
try:
score = float(np.mean(cross_val_score(model, X_local, y_local, scoring="r2", cv=3, n_jobs=1)))
except Exception:
score = -999.0
return {"model_obj": model, "cv_score": score, "best_params": best, "family": family_name, "study": study}
# --- Run tuning across available families (user triggered) ---
run_btn = st.button(" Run expanded AutoML + Stacking")
if run_btn:
log("AutoML + Stacking initiated.")
with st.spinner("Tuning multiple families (this may take a while depending on choices)..."):
families_to_try = ["RandomForest", "ExtraTrees", "MLP"]
if allow_advanced:
if optional_families.get("XGBoost"): families_to_try.append("XGBoost")
if optional_families.get("LightGBM"): families_to_try.append("LightGBM")
if optional_families.get("CatBoost"): families_to_try.append("CatBoost")
if optional_families.get("TabPFN"): families_to_try.append("TabPFN")
if optional_families.get("FTTransformer"): families_to_try.append("FTTransformer")
tuned_results = []
for fam in families_to_try:
log(f"Tuning family: {fam}")
st.caption(f"Tuning family: {fam}")
res = tune_family(fam, X, y, n_trials=max_trials)
# res can be dict or single-run result; ensure consistent format
if isinstance(res, dict) and "model_obj" in res:
tuned_results.append(res)
else:
st.warning(f"Family {fam} returned unexpected tune result: {res}")
log("All families tuned successfully.")
# build leaderboard DataFrame
lb = pd.DataFrame([{"family": r["family"], "cv_r2": r["cv_score"], "params": r["best_params"]} for r in tuned_results])
lb = lb.sort_values("cv_r2", ascending=False).reset_index(drop=True)
st.markdown("### Tuning Leaderboard (by CV R²)")
st.dataframe(lb[["family","cv_r2"]].round(4))
# --- Bonus Visualization: Model Performance Summary ---
if not lb.empty:
st.markdown("#### Model Performance Summary (CV R²)")
fig_perf, ax_perf = plt.subplots(figsize=(7, 4))
colors = ["#2C6E91" if fam != lb.iloc[0]["family"] else "#C65F00" for fam in lb["family"]]
ax_perf.barh(lb["family"], lb["cv_r2"], color=colors, alpha=0.85)
ax_perf.set_xlabel("Cross-Validated R² Score", fontsize=10)
ax_perf.set_ylabel("Model Family", fontsize=10)
ax_perf.set_title("Performance Comparison Across Model Families", fontsize=12)
ax_perf.invert_yaxis()
for i, v in enumerate(lb["cv_r2"]):
ax_perf.text(v + 0.005, i, f"{v:.3f}", va="center", fontsize=9)
sns.despine()
st.pyplot(fig_perf, clear_figure=True)
# --- Build base-models and collect out-of-fold preds for stacking ---
st.markdown("### Building base models & out-of-fold predictions for stacking")
kf = KFold(n_splits=5, shuffle=True, random_state=42)
base_models = []
oof_preds = pd.DataFrame(index=X.index)
for idx, row in lb.iterrows():
fam = row["family"]
model_entry = next((r for r in tuned_results if r["family"] == fam), None)
if model_entry is None:
continue
model_obj = model_entry["model_obj"]
# train out-of-fold predictions
oof = np.zeros(X.shape[0])
for tr_idx, val_idx in kf.split(X):
X_tr, X_val = X.iloc[tr_idx], X.iloc[val_idx]
y_tr = y.iloc[tr_idx]
# fit family-specific wrapper (TabPFN/FTTransformer special-case)
if model_obj == "TabPFN_placeholder":
try:
# TabPFN expects specific API; create a simple fallback: use RandomForest to approximate
tmp = RandomForestRegressor(n_estimators=200, max_depth=8, random_state=42, n_jobs=-1)
tmp.fit(X_tr, y_tr)
oof[val_idx] = tmp.predict(X_val)
except Exception:
oof[val_idx] = np.mean(y_tr)
else:
try:
model_obj.fit(X_tr, y_tr)
oof[val_idx] = model_obj.predict(X_val)
except Exception:
# fallback to mean
oof[val_idx] = np.mean(y_tr)
oof_preds[f"{fam}_oof"] = oof
# finally fit model on full data
try:
if model_entry["model_obj"] == "TabPFN_placeholder":
# fallback full-model: RandomForest
fitted = RandomForestRegressor(n_estimators=200, max_depth=8, random_state=42, n_jobs=-1)
fitted.fit(X, y)
else:
model_entry["model_obj"].fit(X, y)
fitted = model_entry["model_obj"]
except Exception:
fitted = RandomForestRegressor(n_estimators=200, max_depth=8, random_state=42, n_jobs=-1)
fitted.fit(X, y)
base_models.append({"family": fam, "model": fitted, "cv_r2": model_entry["cv_score"]})
# --- prune highly correlated OOF preds and keep top_k diverse models ---
if oof_preds.shape[1] == 0:
st.error("No base models created — aborting stacking.")
else:
corr_matrix = oof_preds.corr().abs()
# compute diversity score = (1 - mean correlation with others)
diversity = {col: 1 - corr_matrix[col].drop(col).mean() for col in corr_matrix.columns}
summary = []
for bm in base_models:
col = f"{bm['family']}_oof"
summary.append({"family": bm["family"], "cv_r2": bm["cv_r2"], "diversity": diversity.get(col, 0.0)})
summary_df = pd.DataFrame(summary).sort_values(["cv_r2", "diversity"], ascending=[False, False]).reset_index(drop=True)
st.markdown("### Base Model Summary (cv_r2, diversity)")
st.dataframe(summary_df.round(4))
# select top_k by cv_r2 and diversity combined
selected = summary_df.sort_values(["cv_r2","diversity"], ascending=[False, False]).head(top_k)["family"].tolist()
st.markdown(f"Selected for stacking (top {top_k}): {selected}")
# build stacking training data (OOF preds for selected)
selected_cols = [f"{s}_oof" for s in selected]
X_stack = oof_preds[selected_cols].fillna(0)
meta = Ridge(alpha=1.0)
meta.fit(X_stack, y)
# --- Robust holdout evaluation & SHAP (safe for deployment) ---
# Split for holdout
X_tr, X_val, y_tr, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# Helper to always produce scalar-safe mean
def scalar_mean(arr):
try:
return float(np.mean(arr))
except Exception:
return float(np.mean(np.ravel(arr)))
# Build family → model map
base_model_map = {bm["family"]: bm["model"] for bm in base_models}
meta_inputs = []
missing_families = []
n_meta_features_trained = X_stack.shape[1]
# Collect predictions from each selected model
for fam in selected:
bm = base_model_map.get(fam)
if bm is None:
missing_families.append(fam)
safe_mean = scalar_mean(y_tr)
meta_inputs.append(np.full(len(X_val), safe_mean))
continue
try:
preds = bm.predict(X_val)
preds = np.asarray(preds)
# Collapse multi-output predictions to 1D
if preds.ndim == 2:
preds = preds.mean(axis=1)
preds = preds.reshape(-1)
if preds.shape[0] != len(X_val):
preds = np.full(len(X_val), scalar_mean(y_tr))
meta_inputs.append(preds)
except Exception as e:
safe_mean = scalar_mean(y_tr)
meta_inputs.append(np.full(len(X_val), safe_mean))
if missing_families:
st.warning(f"Missing base models: {missing_families}. Using mean predictions.")
# Stack meta features
if not meta_inputs:
st.error("No meta features to predict — aborting.")
st.stop()
X_meta_val = np.column_stack(meta_inputs)
n_meta_features_val = X_meta_val.shape[1]
# Align meta features between training and validation
if n_meta_features_val < n_meta_features_trained:
pad_cols = n_meta_features_trained - n_meta_features_val
safe_mean = scalar_mean(y_tr)
pad = np.tile(np.full((len(X_val), 1), safe_mean), (1, pad_cols))
X_meta_val = np.hstack([X_meta_val, pad])
elif n_meta_features_val > n_meta_features_trained:
X_meta_val = X_meta_val[:, :n_meta_features_trained]
if X_meta_val.shape[1] != n_meta_features_trained:
st.error(f"Stack alignment failed: {X_meta_val.shape[1]} != {n_meta_features_trained}")
st.stop()
# Meta prediction
y_meta_pred = meta.predict(pd.DataFrame(X_meta_val, columns=X_stack.columns))
# Final evaluation
final_r2 = r2_score(y_val, y_meta_pred)
final_rmse = float(np.sqrt(mean_squared_error(y_val, y_meta_pred)))
st.success("AutoML + Stacking complete — metrics, artifacts, and SHAP ready.")
log(f"Completed stacking. Final R2={final_r2:.4f}, RMSE={final_rmse:.4f}")
# ===============================
# OPERATOR ADVISORY SYSTEM
# ===============================
st.markdown("---")
st.subheader("Operator Advisory System — Real-Time Shift Recommendations")
try:
# Use top base model already identified
top_base = next((b for b in base_models if b["family"] == selected[0]), None)
if top_base and hasattr(top_base["model"], "predict"):
sample_X = X_val.sample(min(300, len(X_val)), random_state=42)
model = top_base["model"]
# SHAP direction analysis
expl = shap.TreeExplainer(model)
shap_vals = expl.shap_values(sample_X)
# --- Normalize SHAP output structure (handles list, ndarray, or multi-dim cases) ---
if isinstance(shap_vals, list): # e.g., for multiclass models
shap_vals = shap_vals[0]
shap_vals = np.array(shap_vals)
# If SHAP output has >2 dims, reduce to (n_samples, n_features)
if shap_vals.ndim > 2:
shap_vals = shap_vals.reshape(shap_vals.shape[0], -1)
# Align SHAP features to DataFrame
if shap_vals.shape[1] != sample_X.shape[1]:
min_feats = min(shap_vals.shape[1], sample_X.shape[1])
shap_vals = shap_vals[:, :min_feats]
sample_X = sample_X.iloc[:, :min_feats]
# Compute robust means
mean_abs = np.abs(shap_vals).mean(axis=0)
mean_sign = np.sign(shap_vals).mean(axis=0)
importance = pd.DataFrame({
"Feature": sample_X.columns,
"Mean |SHAP|": mean_abs,
"Mean SHAP Sign": mean_sign
}).sort_values("Mean |SHAP|", ascending=False)
# Display Top 5 Drivers
st.markdown("### Top 5 Operational Drivers Influencing Target")
st.dataframe(importance.head(5).style.format({"Mean |SHAP|": "{:.3f}", "Mean SHAP Sign": "{:.3f}"}))
# Direction-based recommendations
recommendations = []
for _, row in importance.head(5).iterrows():
f = row["Feature"]
s = row["Mean SHAP Sign"]
if s > 0.05:
recommendations.append(f"Increase `{f}` likely increases `{target}`")
elif s < -0.05:
recommendations.append(f"Decrease `{f}` likely increases `{target}`")
else:
recommendations.append(f" `{f}` is neutral or nonlinear for `{target}`")
st.markdown("### Suggested Operator Adjustments (Model-Inferred)")
st.write("\n".join(recommendations))
# Delta recommendations vs previous shift
prev_shift = df.tail(200).mean(numeric_only=True)
recommended_shift = prev_shift.copy()
for rec in recommendations:
if "Increase" in rec:
name = rec.split('`')[1]
if name in recommended_shift:
recommended_shift[name] *= 1.03 # +3%
elif "Decrease" in rec:
name = rec.split('`')[1]
if name in recommended_shift:
recommended_shift[name] *= 0.97 # -3%
# Delta table
st.markdown("### 🧾 Shift Adjustment Summary (vs Previous 200 Samples)")
deltas = pd.DataFrame({
"Current Avg": prev_shift,
"Suggested": recommended_shift,
"Δ (%)": ((recommended_shift - prev_shift) / prev_shift * 100)
}).loc[[r.split('`')[1] for r in recommendations if '`' in r]].round(2)
st.dataframe(deltas.fillna(0).style.format("{:.2f}"))
log("Operator advisory system executed successfully.")
# Optional: LLM-generated human-friendly summary
st.markdown("### Natural Language Operator Note")
try:
import importlib.util
if importlib.util.find_spec("transformers"):
from transformers import pipeline
tiny_llm_path = os.path.join(LOG_DIR, "cached_tiny_llm")
if os.path.exists(os.path.join(tiny_llm_path, "config.json")):
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained(tiny_llm_path)
tokenizer = AutoTokenizer.from_pretrained(tiny_llm_path)
assistant = pipeline("text-generation", model=model, tokenizer=tokenizer)
else:
assistant = pipeline("text2text-generation", model="google/flan-t5-small")
llm_prompt = f"""
You are a metallurgical process advisor working in a steel manufacturing unit.
Based on these recommendations:
{recommendations}
and these shift averages:
{deltas.to_dict(orient='index')}
Write a concise 3-line message to the operator suggesting what to adjust this shift.
"""
resp = assistant(llm_prompt, max_new_tokens=120)[0]["generated_text"]
st.info(resp)
log("Operator LLM advisory note generated successfully.")
else:
st.warning("Transformers not available — install it for text generation.")
except Exception as e:
st.warning(f"LLM advisory generation skipped: {e}")
else:
st.info("No suitable model found for operator advisory system.")
except Exception as e:
st.error(f"Operator advisory system failed: {e}")
log(f"Operator advisory error: {e}")
c1, c2 = st.columns(2)
c1.metric("Stacked Ensemble R² (holdout)", f"{final_r2:.4f}")
c2.metric("Stacked Ensemble RMSE (holdout)", f"{final_rmse:.4f}")
# Scatter comparison
fig, ax = plt.subplots(figsize=(7, 4))
ax.scatter(y_val, y_meta_pred, alpha=0.6)
ax.plot([y_val.min(), y_val.max()], [y_val.min(), y_val.max()], "r--")
ax.set_xlabel("Actual")
ax.set_ylabel("Stacked Predicted")
st.pyplot(fig)
# Save trained stack artifacts
joblib.dump(meta, ENSEMBLE_PATH)
st.caption(f"Stacked ensemble snapshot updated → {ENSEMBLE_PATH}")
log(f"Ensemble model updated for use case: {use_case}")
# Explainability
st.markdown("### Explainability (approximate)")
try:
top_base = next((b for b in base_models if b["family"] == selected[0]), None)
if top_base and hasattr(top_base["model"], "predict"):
sample_X = X_val.sample(min(300, len(X_val)), random_state=42)
if any(k in top_base["family"] for k in ["XGBoost", "LightGBM", "RandomForest", "ExtraTrees", "CatBoost"]):
expl = shap.TreeExplainer(top_base["model"])
shap_vals = expl.shap_values(sample_X)
fig_sh = plt.figure(figsize=(8, 6))
shap.summary_plot(shap_vals, sample_X, show=False)
st.pyplot(fig_sh)
else:
st.info("Top model not tree-based; skipping SHAP summary.")
else:
st.info("No suitable base model for SHAP explanation.")
except Exception as e:
st.warning(f"SHAP computation skipped: {e}")
st.success(" AutoML + Stacking complete — metrics, artifacts, and SHAP ready.")
# --- Store AutoML summary for optional LLM advisory ---
if "automl_summary" not in st.session_state:
st.session_state["automl_summary"] = {
"leaderboard": lb[["family", "cv_r2"]].round(4).to_dict(orient="records"),
"final_r2": float(final_r2),
"final_rmse": float(final_rmse),
"target": target,
"use_case": use_case
}
else:
# Always refresh with latest metrics after each run
st.session_state["automl_summary"].update({
"leaderboard": lb[["family", "cv_r2"]].round(4).to_dict(orient="records"),
"final_r2": float(final_r2),
"final_rmse": float(final_rmse),
"target": target,
"use_case": use_case
})
# Persist SHAP-based recommendations for reuse across reruns
if "shap_recommendations" not in st.session_state:
st.session_state["shap_recommendations"] = recommendations
else:
st.session_state["shap_recommendations"] = recommendations
# --- AI Recommendation Assistant (in-memory safe for Hugging Face) ---
st.markdown("---")
st.subheader("AI Recommendation Assistant (in-memory mode)")
st.caption("Generates quick local AI suggestions — no file writes required.")
if "llm_result" not in st.session_state:
st.session_state["llm_result"] = None
if st.button("Get AI Recommendation (via HF API)", key="ai_reco"):
summary = st.session_state.get("automl_summary", {})
if not summary:
st.warning("Please run AutoML first to generate context.")
st.stop()
try:
import requests, json
st.info("Contacting Hugging Face Inference API (Mixtral-8x7B-Instruct)…")
API_URL = "https://api-inference.huggingface.co/models/mistralai/Mixtral-8x7B-Instruct-v0.1"
headers = {"Authorization": f"Bearer {st.secrets['HF_TOKEN']}"}
prompt = f"""
You are an ML model tuning advisor.
Based on this AutoML summary, suggest 3 concise, actionable steps
to improve model performance if overfitting, underfitting, or data-quality issues are observed.
Use case: {summary.get('use_case')}
Target: {summary.get('target')}
Final R²: {summary.get('final_r2')}
Final RMSE: {summary.get('final_rmse')}
Leaderboard: {summary.get('leaderboard')}
"""
payload = {
"inputs": prompt,
"parameters": {"max_new_tokens": 200, "temperature": 0.7}
}
response = requests.post(API_URL, headers=headers, json=payload, timeout=60)
response.raise_for_status()
result = response.json()
if isinstance(result, list) and "generated_text" in result[0]:
text = result[0]["generated_text"]
elif isinstance(result, dict) and "generated_text" in result:
text = result["generated_text"]
else:
text = json.dumps(result, indent=2)
st.session_state["llm_result"] = text.strip()
log("HF API recommendation generated successfully.")
st.success("AI Recommendation (Mixtral-8x7B-Instruct):")
st.markdown(st.session_state["llm_result"])
except Exception as e:
err_msg = f"HF Inference API call failed: {e}"
st.error(err_msg)
log(err_msg)
# Persist output even after rerun
if st.session_state["llm_result"]:
st.success("AI Recommendation (cached):")
st.markdown(st.session_state["llm_result"])
# ----- Target & Business Impact tab
with tabs[5]:
st.subheader("Recommended Target Variables by Use Case")
st.markdown("Each use case maps to a practical target variable that drives measurable business impact.")
target_table = pd.DataFrame([
["Predictive Maintenance (Mills, Motors, Compressors)", "bearing_temp / time_to_failure", "Rises before mechanical failure; early warning", "₹10–30 L per asset/year"],
["Blast Furnace / EAF Data Intelligence", "furnace_temp / tap_temp", "Central control variable, linked to energy and quality", "₹20–60 L/year"],
["Casting Quality Optimization", "defect_probability / solidification_rate", "Determines billet quality; control nozzle & cooling", "₹50 L/year yield gain"],
["Rolling Mill Energy Optimization", "energy_per_ton / exit_temp", "Directly tied to energy efficiency", "₹5–10 L/year per kWh/t"],
["Surface Defect Detection (Vision AI)", "defect_probability", "Quality metric from CNN", "1–2 % yield gain"],
["Material Composition & Alloy Mix AI", "deviation_from_target_grade", "Predict deviation, suggest corrections", "₹20 L/year raw material savings"],
["Inventory & Yield Optimization", "yield_ratio (output/input)", "Linked to WIP and process yield", "₹1 Cr+/year"],
["Refractory & Cooling Loss Prediction", "lining_thickness / heat_loss_rate", "Predict wear for planned maintenance", "₹40 L/year downtime savings"]], columns=["Use Case", "Target Variable", "Why It’s Ideal", "Business Leverage"])
st.dataframe(target_table, width="stretch")
st.markdown("---")
st.subheader("Business Framing for Clients")
st.markdown("These metrics show approximate annual benefits from small process improvements.")
business_table = pd.DataFrame([
["Energy consumption", "400 kWh/ton", "₹35–60 L"],
["Electrode wear", "1.8 kg/ton", "₹10 L"],
["Refractory wear", "3 mm/heat", "₹15 L"],
["Oxygen usage", "40 Nm³/ton", "₹20 L"],
["Yield loss", "2 %", "₹50 L – ₹1 Cr"],
], columns=["Metric", "Typical Value (EAF India)", "5 % Improvement → Annual ₹ Value"])
st.dataframe(business_table, width="stretch")
st.info("These numbers are indicative averages; actual benefits depend on plant capacity and process efficiency.")
# ----- Bibliography tab
with tabs[6]:
st.subheader("Annotated Bibliography — Justification for Target Variables")
st.markdown("""
These papers justify the chosen target variables (temperature, yield, efficiency, refractory wear)
in metallurgical AI modeling. Click any title to open the official paper.
""")
bib_data = [
{
"title": "A Survey of Data-Driven Soft Sensing in Ironmaking Systems",
"authors": "Yan et al. (2024)",
"notes": "Soft sensors for furnace and tap temperature; validates `furnace_temp` and `tap_temp` targets.",
"url": "https://doi.org/10.1021/acsomega.4c01254"
},
{
"title": "Optimisation of Operator Support Systems through Artificial Intelligence for the Cast Steel Industry",
"authors": "Ojeda Roldán et al. (2022)",
"notes": "Reinforcement learning for oxygen blowing and endpoint control; supports temperature and carbon targets.",
"url": "https://doi.org/10.3390/jmmp6020034"
},
{
"title": "Analyzing the Energy Efficiency of Electric Arc Furnace Steelmaking",
"authors": "Zhuo et al. (2024)",
"notes": "Links arc power, temperature, and energy KPIs — validates `energy_efficiency` and `power_density`.",
"url": "https://doi.org/10.3390/met15010113"
},
{
"title": "Dynamic EAF Modeling and Slag Foaming Index Prediction",
"authors": "MacRosty et al.",
"notes": "Supports refractory and heat-flux-based wear prediction — validates `lining_thickness` target.",
"url": "https://www.sciencedirect.com/science/article/pii/S0921883123004019"
},
{
"title": "Machine Learning for Yield Optimization in Continuous Casting",
"authors": "Springer (2023)",
"notes": "ML for yield ratio and defect minimization; supports `yield_ratio` target.",
"url": "https://link.springer.com/article/10.1007/s40964-023-00592-7"
}
]
bib_df = pd.DataFrame(bib_data)
bib_df["Paper Title"] = bib_df.apply(lambda x: f"[{x['title']}]({x['url']})", axis=1)
st.markdown("### Annotated Bibliography — Justification for Target Variables")
for _, row in bib_df.iterrows():
st.markdown(
f"**[{row['title']}]({row['url']})** \n"
f"*{row['authors']}* \n"
f" _{row['notes']}_ \n",
unsafe_allow_html=True
)
st.info("Click any paper title above to open it in a new tab.")
st.markdown("""
**Feature ↔ Target Justification**
- `furnace_temp`, `tap_temp` → Process temperature (Yan 2024, Ojeda 2022)
- `yield_ratio` → Production yield (Springer 2023)
- `energy_efficiency`, `power_density` → Energy KPIs (Zhuo 2024)
- `lining_thickness`, `slag_foaming_index` → Refractory & process health (MacRosty et al.)
""")
st.info("Click any paper title above to open it in a new tab.")
log("Bibliography tab rendered successfully.")
# -------------------------
# Footer / Notes
# -------------------------
st.markdown("---")
st.markdown("**Notes:** This dataset is synthetic and for demo/prototyping. Real plant integration requires NDA, data on-boarding, sensor mapping, and plant safety checks before any control actions.")
# ----- Download tab
with tabs[-2]:
st.subheader(" Download Saved Files (Flat Log Mode)")
available_files = [f for f in os.listdir(LOG_DIR) if os.path.isfile(os.path.join(LOG_DIR, f))]
if not available_files:
st.info("No files found yet — run AutoML once to generate outputs.")
else:
for f in sorted(available_files):
path = os.path.join(LOG_DIR, f)
with open(path, "rb") as fp:
st.download_button(
label=f" Download {f}",
data=fp,
file_name=f,
mime="application/octet-stream"
)
# ----- Logs tab
with tabs[-1]:
st.subheader(" Master Log (append-in-place)")
if os.path.exists(LOG_PATH):
with open(LOG_PATH, "r", encoding="utf-8") as f:
content = f.read()
st.text_area("Master Log Output", content, height=400)
st.download_button("Download Log", content, file_name="run_master.log")
else:
st.info("No log file yet — run AutoML once to start logging.")