|
|
|
|
| import os |
| import json |
| import time |
| from datetime import datetime |
| import numpy as np |
| import pandas as pd |
| import streamlit as st |
| import matplotlib.pyplot as plt |
| import seaborn as sns |
| import joblib |
|
|
| |
| from sklearn.model_selection import train_test_split |
| from sklearn.linear_model import LinearRegression |
| from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor |
| from sklearn.preprocessing import StandardScaler, PolynomialFeatures |
| from sklearn.decomposition import PCA |
| from sklearn.cluster import KMeans |
| from sklearn.metrics import mean_squared_error, r2_score |
|
|
| |
| import shap |
|
|
| |
| |
| |
| st.set_page_config(page_title="AI Feature Universe Explorer — Advanced + SHAP", layout="wide") |
| DATA_DIR = os.getenv("DATA_DIR", "./data") |
| os.makedirs(DATA_DIR, exist_ok=True) |
| CSV_PATH = os.path.join(DATA_DIR, "flatfile_universe_advanced.csv") |
| META_PATH = os.path.join(DATA_DIR, "feature_metadata_advanced.json") |
| PDF_PATH = os.path.join(DATA_DIR, "annotated_bibliography.pdf") |
| ENSEMBLE_ARTIFACT = os.path.join(DATA_DIR, "ensemble_models.joblib") |
|
|
| |
| |
| |
| def generate_advanced_flatfile( |
| n_rows=3000, |
| random_seed=42, |
| max_polynomial_new=60, |
| global_variance_multiplier=1.0, |
| variance_overrides=None, |
| ): |
| """ |
| Generates a large synthetic, physics-aligned dataset with many engineered features. |
| Allows control of variability per feature (through variance_overrides) or globally |
| (via global_variance_multiplier). |
| |
| Args: |
| n_rows: number of samples |
| random_seed: RNG seed |
| max_polynomial_new: limit on number of polynomial expansion features |
| global_variance_multiplier: multiplier applied to all default stddevs |
| variance_overrides: dict mapping feature name or substring → stddev multiplier |
| """ |
| np.random.seed(random_seed) |
| os.makedirs(DATA_DIR, exist_ok=True) |
| if variance_overrides is None: |
| variance_overrides = {} |
|
|
| |
| natural_feats = [ |
| "vibration_x","vibration_y","motor_current","rpm","bearing_temp","ambient_temp","lube_pressure","power_factor", |
| "furnace_temp","tap_temp","slag_temp","offgas_co","offgas_co2","o2_probe_pct","c_feed_rate","arc_power","furnace_pressure","feed_time", |
| "mold_temp","casting_speed","nozzle_pressure","cooling_water_temp","billet_length","chemical_C","chemical_Mn","chemical_Si","chemical_S", |
| "roll_speed","motor_load","coolant_flow","exit_temp","strip_thickness","line_tension","roller_vibration", |
| "lighting_intensity","surface_temp","image_entropy_proxy", |
| "spectro_Fe","spectro_C","spectro_Mn","spectro_Si","time_since_last_sample", |
| "batch_id_numeric","weight_input","weight_output","time_in_queue","conveyor_speed", |
| "shell_temp","lining_thickness","water_flow","cooling_out_temp","heat_flux" |
| ] |
| natural_feats = list(dict.fromkeys(natural_feats)) |
|
|
| |
| def effective_sd(feature_name, base_sd): |
| |
| if feature_name in variance_overrides: |
| return float(variance_overrides[feature_name]) |
| |
| for key, val in variance_overrides.items(): |
| if key in feature_name: |
| return float(val) |
| |
| return float(base_sd) * float(global_variance_multiplier) |
|
|
| |
| def sample_col(name, n): |
| name_l = name.lower() |
| if "furnace_temp" in name_l or name_l.endswith("_temp") or "tap_temp" in name_l: |
| sd = effective_sd("furnace_temp", 50) |
| return np.random.normal(1550, sd, n) |
| if name_l in ("tap_temp","mold_temp","shell_temp","cooling_out_temp","exit_temp"): |
| sd = effective_sd(name_l, 30) |
| return np.random.normal(200 if "mold" not in name_l else 1500, sd, n) |
| if "offgas_co2" in name_l: |
| sd = effective_sd("offgas_co2", 4) |
| return np.abs(np.random.normal(15, sd, n)) |
| if "offgas_co" in name_l: |
| sd = effective_sd("offgas_co", 5) |
| return np.abs(np.random.normal(20, sd, n)) |
| if "o2" in name_l: |
| sd = effective_sd("o2_probe_pct", 1) |
| return np.clip(np.random.normal(5, sd, n), 0.01, 60) |
| if "arc_power" in name_l or "motor_load" in name_l: |
| sd = effective_sd("arc_power", 120) |
| return np.abs(np.random.normal(600, sd, n)) |
| if "rpm" in name_l: |
| sd = effective_sd("rpm", 30) |
| return np.abs(np.random.normal(120, sd, n)) |
| if "vibration" in name_l: |
| sd = effective_sd("vibration", 0.15) |
| return np.abs(np.random.normal(0.4, sd, n)) |
| if "bearing_temp" in name_l: |
| sd = effective_sd("bearing_temp", 5) |
| return np.random.normal(65, sd, n) |
| if "chemical" in name_l or "spectro" in name_l: |
| sd = effective_sd("chemical", 0.15) |
| return np.random.normal(0.7, sd, n) |
| if "weight" in name_l: |
| sd = effective_sd("weight", 100) |
| return np.random.normal(1000, sd, n) |
| if "conveyor_speed" in name_l or "casting_speed" in name_l: |
| sd = effective_sd("casting_speed", 0.6) |
| return np.random.normal(2.5, sd, n) |
| if "power_factor" in name_l: |
| sd = effective_sd("power_factor", 0.03) |
| return np.clip(np.random.normal(0.92, sd, n), 0.6, 1.0) |
| if "image_entropy_proxy" in name_l: |
| sd = effective_sd("image_entropy_proxy", 0.25) |
| return np.abs(np.random.normal(0.5, sd, n)) |
| if "batch_id" in name_l: |
| return np.random.randint(1000,9999,n) |
| if "time_since" in name_l or "time_in_queue" in name_l: |
| sd = effective_sd("time_since", 20) |
| return np.abs(np.random.normal(30, sd, n)) |
| if "heat_flux" in name_l: |
| sd = effective_sd("heat_flux", 300) |
| return np.abs(np.random.normal(1000, sd, n)) |
| return np.random.normal(0, effective_sd(name_l, 1), n) |
|
|
| |
| df = pd.DataFrame({c: sample_col(c, n_rows) for c in natural_feats}) |
|
|
| |
| start = pd.Timestamp("2025-01-01T00:00:00") |
| df["timestamp"] = pd.date_range(start, periods=n_rows, freq="T") |
| df["cycle_minute"] = np.mod(np.arange(n_rows), 80) |
| df["meta_plant_name"] = np.random.choice(["Rourkela","Jamshedpur","VSP","Bokaro","Kalinganagar","Salem"], n_rows) |
| df["meta_country"] = "India" |
|
|
| |
| df["carbon_proxy"] = df["offgas_co"] / (df["offgas_co2"] + 1.0) |
| df["oxygen_utilization"] = df["offgas_co2"] / (df["offgas_co"] + 1.0) |
| df["power_density"] = df["arc_power"] / (df["weight_input"] + 1.0) |
| df["energy_efficiency"] = df["furnace_temp"] / (df["arc_power"] + 1.0) |
| df["slag_foaming_index"] = (df["slag_temp"] * df["offgas_co"]) / (df["o2_probe_pct"] + 1.0) |
| df["yield_ratio"] = df["weight_output"] / (df["weight_input"] + 1e-9) |
|
|
| |
| rolling_cols = ["arc_power","furnace_temp","offgas_co","offgas_co2","motor_current","vibration_x","weight_input"] |
| for rc in rolling_cols: |
| if rc in df.columns: |
| df[f"{rc}_roll_mean_3"] = df[rc].rolling(3, min_periods=1).mean() |
| df[f"{rc}_roll_std_5"] = df[rc].rolling(5, min_periods=1).std().fillna(0) |
| df[f"{rc}_lag1"] = df[rc].shift(1).fillna(method="bfill") |
| df[f"{rc}_roc_1"] = df[rc].diff().fillna(0) |
|
|
| |
| df["arc_o2_interaction"] = df["arc_power"] * df["o2_probe_pct"] |
| df["carbon_power_ratio"] = df["carbon_proxy"] / (df["arc_power"] + 1e-6) |
| df["temp_power_sqrt"] = df["furnace_temp"] * np.sqrt(np.abs(df["arc_power"]) + 1e-6) |
|
|
| |
| numeric = df.select_dtypes(include=[np.number]).fillna(0) |
| poly_source_cols = numeric.columns[:12].tolist() |
| poly = PolynomialFeatures(degree=2, interaction_only=False, include_bias=False) |
| poly_mat = poly.fit_transform(numeric[poly_source_cols]) |
| poly_names = poly.get_feature_names_out(poly_source_cols) |
| poly_df = pd.DataFrame(poly_mat, columns=[f"poly__{n}" for n in poly_names], index=df.index) |
| keep_poly = [c for c in poly_df.columns if c.replace("poly__","") not in poly_source_cols] |
| poly_df = poly_df[keep_poly].iloc[:, :max_polynomial_new] if len(keep_poly) > 0 else poly_df.iloc[:, :0] |
| df = pd.concat([df, poly_df], axis=1) |
|
|
| |
| scaler = StandardScaler() |
| scaled = scaler.fit_transform(numeric) |
| pca = PCA(n_components=6, random_state=42) |
| pca_cols = pca.fit_transform(scaled) |
| for i in range(pca_cols.shape[1]): |
| df[f"pca_{i+1}"] = pca_cols[:, i] |
|
|
| |
| kmeans = KMeans(n_clusters=6, random_state=42, n_init=10) |
| df["operating_mode"] = kmeans.fit_predict(scaled) |
|
|
| |
| surrogate_df = df.copy() |
| surrogate_df["furnace_temp_next"] = surrogate_df["furnace_temp"].shift(-1).fillna(method="ffill") |
| features_for_surrogate = [c for c in ["furnace_temp","arc_power","o2_probe_pct","offgas_co","offgas_co2"] if c in df.columns] |
| if len(features_for_surrogate) >= 2: |
| X = surrogate_df[features_for_surrogate].fillna(0) |
| y = surrogate_df["furnace_temp_next"] |
| rf = RandomForestRegressor(n_estimators=50, random_state=42, n_jobs=-1) |
| rf.fit(X, y) |
| df["pred_temp_30s"] = rf.predict(X) |
| else: |
| df["pred_temp_30s"] = df["furnace_temp"] |
|
|
| if all(c in df.columns for c in ["offgas_co","offgas_co2","o2_probe_pct"]): |
| X2 = df[["offgas_co","offgas_co2","o2_probe_pct"]].fillna(0) |
| rf2 = RandomForestRegressor(n_estimators=50, random_state=1, n_jobs=-1) |
| rf2.fit(X2, df["carbon_proxy"]) |
| df["pred_carbon_5min"] = rf2.predict(X2) |
| else: |
| df["pred_carbon_5min"] = df["carbon_proxy"] |
|
|
| |
| df["refractory_limit_flag"] = (df["lining_thickness"] < 140).astype(int) |
| df["max_allowed_power_delta"] = np.clip(df["arc_power"].diff().abs().fillna(0), 0, 2000) |
|
|
| |
| df["ARC_ON"] = ((df["arc_power"] > df["arc_power"].median()) & (df["carbon_proxy"] < 1.0)).astype(int) |
| df["prediction_confidence"] = np.clip(np.random.beta(2,5, n_rows), 0.05, 0.99) |
|
|
| |
| df.replace([np.inf, -np.inf], np.nan, inplace=True) |
| df.fillna(method="bfill", inplace=True) |
| df.fillna(0, inplace=True) |
|
|
| |
| df.to_csv(CSV_PATH, index=False) |
| meta = [] |
| for col in df.columns: |
| if col in natural_feats: |
| source = "natural" |
| elif col.startswith("poly__") or col.startswith("pca_") or col in ["operating_mode"]: |
| source = "advanced_synthetic" |
| else: |
| source = "synthetic" |
| meta.append({ |
| "feature_name": col, |
| "source_type": source, |
| "linked_use_cases": ["All" if source!="natural" else "Mapped"], |
| "units": "-", |
| "formula": "see generator logic", |
| "remarks": "auto-generated or simulated" |
| }) |
| with open(META_PATH, "w") as f: |
| json.dump(meta, f, indent=2) |
|
|
| |
| try: |
| from fpdf import FPDF |
| pdf = FPDF('P','mm','A4') |
| pdf.add_page() |
| pdf.set_font("Helvetica","B",14) |
| pdf.cell(0,8,"Annotated Bibliography - Metallurgical AI (Selected Papers)", ln=True) |
| pdf.ln(2) |
| pdf.set_font("Helvetica","",10) |
| pdf.cell(0,6,"Generated: " + datetime.utcnow().strftime("%Y-%m-%d %H:%M UTC"), ln=True) |
| pdf.ln(4) |
| bib_items = [ |
| ("A Survey of Data-Driven Soft Sensing in Ironmaking Systems","Yan et al. (2024)","Review of soft-sensors; supports gas proxies, lags, PCA."), |
| ("Optimisation of Oxygen Blowing Process using RL","Ojeda Roldan et al. (2022)","RL for oxygen control; motivates surrogate predicted states & safety indices."), |
| ("Analyzing the Energy Efficiency of Electric Arc Furnace","Zhuo et al. (2024)","Energy KPIs (kWh/t) motivate power_density & energy_efficiency features."), |
| ("BOF/Endpoint prediction techniques","Springer (2024)","Endpoint prediction; supports temporal lags and cycle encoding."), |
| ("Dynamic EAF modeling & slag foaming","MacRosty et al.","Physics priors for slag_foaming_index and refractory health modeling.") |
| ] |
| for title, auth, note in bib_items: |
| pdf.set_font("Helvetica","B",11) |
| pdf.multi_cell(0,6, f"{title} — {auth}") |
| pdf.set_font("Helvetica","",10) |
| pdf.multi_cell(0,5, f"Notes: {note}") |
| pdf.ln(2) |
| pdf.output(PDF_PATH) |
| except Exception as e: |
| with open(PDF_PATH.replace(".pdf",".txt"), "w") as tf: |
| tf.write("Annotated bibliography generated. Install fpdf for PDF output.\n") |
|
|
| return CSV_PATH, META_PATH, PDF_PATH |
|
|
| |
| |
| |
| if not os.path.exists(CSV_PATH) or not os.path.exists(META_PATH): |
| with st.spinner("Generating synthetic features (this may take ~20-60s)..."): |
| CSV_PATH, META_PATH, PDF_PATH = generate_advanced_flatfile(n_rows=3000, random_seed=42, max_polynomial_new=80) |
| st.success(f"Generated dataset and metadata: {CSV_PATH}") |
|
|
| |
| |
| |
| @st.cache_data |
| def load_data(csv_path=CSV_PATH, meta_path=META_PATH): |
| df_local = pd.read_csv(csv_path) |
| with open(meta_path, "r") as f: |
| meta_local = json.load(f) |
| return df_local, pd.DataFrame(meta_local) |
|
|
| df, meta_df = load_data() |
|
|
| |
| |
| |
| st.sidebar.title("Feature Explorer - Advanced + SHAP") |
| feat_types = sorted(meta_df["source_type"].unique().tolist()) |
| selected_types = st.sidebar.multiselect("Feature type", feat_types, default=feat_types) |
| numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist() |
|
|
| |
| |
| |
| st.title("Steel Authority of India Limited (SHAP-enabled)") |
| tabs = st.tabs([ |
| "Features", |
| "Visualize", |
| "Correlations", |
| "Stats", |
| "Ensemble + SHAP", |
| "Target & Business Impact", |
| "Bibliography" |
| ]) |
|
|
| |
| with tabs[0]: |
| st.subheader("Feature metadata") |
| filtered_meta = meta_df[meta_df["source_type"].isin(selected_types)] |
| st.dataframe(filtered_meta[["feature_name","source_type","formula","remarks"]].rename(columns={"feature_name":"Feature"}), height=400) |
| st.markdown(f"Total features loaded: **{df.shape[1]}** | Rows: **{df.shape[0]}**") |
|
|
| |
| with tabs[1]: |
| st.subheader("Feature visualization") |
| col = st.selectbox("Choose numeric feature", numeric_cols, index=0) |
| bins = st.slider("Histogram bins", 10, 200, 50) |
| fig, ax = plt.subplots(figsize=(8,4)) |
| sns.histplot(df[col], bins=bins, kde=True, ax=ax) |
| ax.set_title(col) |
| st.pyplot(fig) |
| st.write(df[col].describe().to_frame().T) |
|
|
| |
| with tabs[2]: |
| st.subheader("Correlation explorer") |
| default_corr = numeric_cols[:20] if len(numeric_cols) >= 20 else numeric_cols |
| corr_sel = st.multiselect("Select features (min 2)", numeric_cols, default=default_corr) |
| if len(corr_sel) >= 2: |
| corr = df[corr_sel].corr() |
| fig, ax = plt.subplots(figsize=(10,8)) |
| sns.heatmap(corr, cmap="coolwarm", center=0, ax=ax) |
| st.pyplot(fig) |
| else: |
| st.info("Choose at least 2 numeric features to compute correlation.") |
|
|
| |
| with tabs[3]: |
| st.subheader("Summary statistics (numeric features)") |
| st.dataframe(df.describe().T.style.format("{:.3f}"), height=500) |
|
|
|
|
| |
| with tabs[4]: |
| st.subheader("Autonomous Ensemble Modeling + SHAP Explainability") |
|
|
| |
| target = st.selectbox("Target variable", numeric_cols, index=numeric_cols.index("furnace_temp") if "furnace_temp" in numeric_cols else 0) |
| default_features = [c for c in numeric_cols if c != target][:60] |
| features = st.multiselect("Model input features", numeric_cols, default=default_features) |
| sample_size = st.slider("Sample rows for training", 500, min(4000, df.shape[0]), 1000, step=100) |
| sub_df = df[features + [target]].sample(n=sample_size, random_state=42) |
| X = sub_df[features].fillna(0) |
| y = sub_df[target].fillna(0) |
|
|
| |
| st.markdown("### 🎯 Select Operational Objective") |
| objective = st.selectbox( |
| "Optimization Objective", |
| [ |
| "Maximize Accuracy (R²)", |
| "Minimize RMSE (Stable Control)", |
| "Maximize Yield Ratio (EAF/Inventory)", |
| "Minimize Energy Consumption (Efficiency)", |
| "Balanced (Accuracy + Efficiency)" |
| ], |
| index=0 |
| ) |
|
|
| |
| import optuna |
| from sklearn.model_selection import cross_val_score |
|
|
| st.markdown("### ⚙️ Auto Tuning in Progress") |
|
|
| def objective_fn(trial): |
| model_name = trial.suggest_categorical("model", ["RandomForest", "GradientBoosting", "ExtraTrees"]) |
| n_estimators = trial.suggest_int("n_estimators", 100, 600) |
| max_depth = trial.suggest_int("max_depth", 3, 20) |
| learning_rate = trial.suggest_float("learning_rate", 0.01, 0.3, log=True) |
|
|
| if model_name == "RandomForest": |
| model = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, n_jobs=-1) |
| elif model_name == "GradientBoosting": |
| model = GradientBoostingRegressor(n_estimators=n_estimators, learning_rate=learning_rate, max_depth=max_depth) |
| else: |
| model = ExtraTreesRegressor(n_estimators=n_estimators, max_depth=max_depth, n_jobs=-1) |
|
|
| |
| scoring_metric = "r2" |
| if "RMSE" in objective: |
| scoring_metric = "neg_root_mean_squared_error" |
|
|
| score = cross_val_score(model, X, y, cv=3, scoring=scoring_metric).mean() |
| return score |
|
|
| if st.button("Run Auto Ensemble Optimization"): |
| with st.spinner("Optimizing models... please wait (~20–60s)"): |
| study = optuna.create_study(direction="maximize") |
| study.optimize(objective_fn, n_trials=20) |
|
|
| best_params = study.best_params |
| st.success("✅ Best Auto-Tuned Model Found") |
| st.json(best_params) |
|
|
| |
| model_name = best_params.pop("model") |
| if model_name == "RandomForest": |
| model = RandomForestRegressor(**best_params) |
| elif model_name == "GradientBoosting": |
| model = GradientBoostingRegressor(**best_params) |
| else: |
| model = ExtraTreesRegressor(**best_params) |
| model.fit(X, y) |
|
|
| |
| joblib.dump(model, ENSEMBLE_ARTIFACT) |
| st.caption(f"Model saved: {ENSEMBLE_ARTIFACT}") |
|
|
| |
| st.markdown("### 📈 Optimization History") |
| fig_hist = optuna.visualization.matplotlib.plot_optimization_history(study) |
| st.pyplot(fig_hist) |
|
|
| |
| X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) |
| y_pred = model.predict(X_test) |
|
|
| r2 = r2_score(y_test, y_pred) |
| rmse = mean_squared_error(y_test, y_pred, squared=False) |
|
|
| st.metric("R² Score", f"{r2:.3f}") |
| st.metric("RMSE", f"{rmse:.3f}") |
|
|
| |
| fig, ax = plt.subplots(figsize=(7,4)) |
| ax.scatter(y_test, y_pred, alpha=0.6) |
| ax.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], "r--") |
| ax.set_xlabel("Actual"); ax.set_ylabel("Predicted") |
| st.pyplot(fig) |
|
|
| |
| st.markdown("### 🔍 SHAP Explainability (Auto Model)") |
| explainer = shap.TreeExplainer(model) |
| shap_values = explainer.shap_values(X_test.sample(300)) |
| fig_shap = plt.figure(figsize=(8,6)) |
| shap.summary_plot(shap_values, X_test.sample(300), show=False) |
| st.pyplot(fig_shap) |
|
|
| st.info("Auto tuning complete. Model performance and SHAP summary shown above.") |
|
|
| |
| |
| with tabs[5]: |
| st.subheader("Recommended Target Variables by Use Case") |
| st.markdown("Each use case maps to a practical target variable that drives measurable business impact.") |
|
|
| target_table = pd.DataFrame([ |
| ["Predictive Maintenance (Mills, Motors, Compressors)", "bearing_temp / time_to_failure", "Rises before mechanical failure; early warning", "₹10–30 L per asset/year"], |
| ["Blast Furnace / EAF Data Intelligence", "furnace_temp / tap_temp", "Central control variable, linked to energy and quality", "₹20–60 L/year"], |
| ["Casting Quality Optimization", "defect_probability / solidification_rate", "Determines billet quality; control nozzle & cooling", "₹50 L/year yield gain"], |
| ["Rolling Mill Energy Optimization", "energy_per_ton / exit_temp", "Directly tied to energy efficiency", "₹5–10 L/year per kWh/t"], |
| ["Surface Defect Detection (Vision AI)", "defect_probability", "Quality metric from CNN", "1–2 % yield gain"], |
| ["Material Composition & Alloy Mix AI", "deviation_from_target_grade", "Predict deviation, suggest corrections", "₹20 L/year raw material savings"], |
| ["Inventory & Yield Optimization", "yield_ratio (output/input)", "Linked to WIP and process yield", "₹1 Cr+/year"], |
| ["Refractory & Cooling Loss Prediction", "lining_thickness / heat_loss_rate", "Predict wear for planned maintenance", "₹40 L/year downtime savings"]], columns=["Use Case", "Target Variable", "Why It’s Ideal", "Business Leverage"]) |
|
|
| st.dataframe(target_table, use_container_width=True) |
|
|
| st.markdown("---") |
| st.subheader("Business Framing for Clients") |
| st.markdown("These metrics show approximate annual benefits from small process improvements.") |
|
|
| business_table = pd.DataFrame([ |
| ["Energy consumption", "400 kWh/ton", "₹35–60 L"], |
| ["Electrode wear", "1.8 kg/ton", "₹10 L"], |
| ["Refractory wear", "3 mm/heat", "₹15 L"], |
| ["Oxygen usage", "40 Nm³/ton", "₹20 L"], |
| ["Yield loss", "2 %", "₹50 L – ₹1 Cr"], |
| ], columns=["Metric", "Typical Value (EAF India)", "5 % Improvement → Annual ₹ Value"]) |
|
|
| st.dataframe(business_table, use_container_width=True) |
| st.info("These numbers are indicative averages; actual benefits depend on plant capacity and process efficiency.") |
|
|
| |
| with tabs[6]: |
| st.subheader("Annotated Bibliography & Feature Justification") |
| st.markdown(""" |
| This section summarizes published research supporting the feature design and modeling choices. |
| """) |
|
|
| bib_data = [ |
| ("A Survey of Data-Driven Soft Sensing in Ironmaking Systems", "Yan et al. (2024)", "Supports gas proxies, lags, PCA for off-gas and temperature correlation."), |
| ("Optimisation of Oxygen Blowing Process using RL", "Ojeda Roldan et al. (2022)", "Reinforcement learning for oxygen control; motivates surrogate predicted states & safety indices."), |
| ("Analyzing the Energy Efficiency of Electric Arc Furnace", "Zhuo et al. (2024)", "Energy KPIs (kWh/t) motivate power_density & energy_efficiency features."), |
| ("BOF/Endpoint Prediction Techniques", "Springer (2024)", "Endpoint prediction; supports temporal lags and cycle encoding."), |
| ("Dynamic EAF Modeling & Slag Foaming", "MacRosty et al.", "Physics priors for slag_foaming_index and refractory health modeling."), |
| ] |
|
|
| bib_df = pd.DataFrame(bib_data, columns=["Paper Title", "Authors / Year", "Relevance to Feature Engineering"]) |
| st.dataframe(bib_df, use_container_width=True) |
|
|
| st.markdown(""" |
| **Feature-to-Research Mapping Summary:** |
| - Gas probes & soft-sensing → `carbon_proxy`, `oxygen_utilization` |
| - Power & energy proxies → `power_density`, `energy_efficiency` |
| - Temporal features → rolling means, lags, cycle progress indicators |
| - Surrogate features → `pred_temp_30s`, `pred_carbon_5min` |
| - PCA / clustering → operating mode compression |
| """) |
| |
| |
| |
| st.markdown("---") |
| st.markdown("**Notes:** This dataset is synthetic and for demo/prototyping. Real plant integration requires NDA, data on-boarding, sensor mapping, and plant safety checks before any control actions.") |
|
|