Spaces:
Sleeping
Sleeping
| # app_simulation_multi.py | |
| # - ์ง์ ์ ๋ ฅ: ๋จ์ผ ์ฌ์ง/๋น๋ | |
| # - ๋ฒ์ ์ ๋ ฅ: ๋ค์ค ์ฌ์ง/๋ค์ค ๋น๋ โ ์ ์ฒด ๊ฒฝ์ฐ ์ ์์ฑ ํ ๊ท์น/์ค์ ํํฐ โ MAX_FAILURE & THINNING ๋์ ์์ธก(Blend) | |
| from dashboard_theme.theme import inject | |
| inject("graphite_gold") | |
| import os | |
| import itertools | |
| import warnings | |
| from pathlib import Path | |
| from datetime import datetime | |
| from decimal import Decimal | |
| from typing import Dict, List, Tuple, Union | |
| import numpy as np | |
| import pandas as pd | |
| import streamlit as st | |
| st.title("์๋ฎฌ๋ ์ด์ ์คํ") | |
| warnings.filterwarnings("ignore", category=FutureWarning) | |
| # ์ธ์ฆ ์ฒดํฌ | |
| if "authenticated" not in st.session_state or not st.session_state["authenticated"]: | |
| st.error("โ ์ ๊ทผ ๋ถ๊ฐ: ๋จผ์ ๋ฉ์ธ ํ๋ฉด์์ ๋น๋ฐ๋ฒํธ๋ฅผ ์ ๋ ฅํ์ธ์.") | |
| st.stop() | |
| # ========================================= | |
| # ํ์ด์ง ์ค์ & ์คํ์ผ | |
| # ========================================= | |
| def _set_env_from_secrets(key: str): | |
| try: | |
| val = st.secrets[key] | |
| except Exception: | |
| val = None | |
| if val: | |
| os.environ[key] = str(val) | |
| _set_env_from_secrets("FS_THIN_ART_DIR") | |
| _set_env_from_secrets("FS_MF_ART_DIR") | |
| # ---- Compact ๋ชจ๋: ์ ์ฒด ์ฌ๋ฐฑ/ํจ๋ฉ ์ถ์ ---- | |
| st.markdown(""" | |
| <style> | |
| /* ์ ์ฒด ์ปจํ ์ด๋ ์ํ ํจ๋ฉ ์ค์ด๊ธฐ */ | |
| .block-container{padding-top:0.6rem !important; padding-bottom:1.25rem !important;} | |
| /* ์ ๋ชฉ/์์ ๋ชฉ ๊ฐ๊ฒฉ */ | |
| h1, h2, h3{margin-top:0.4rem !important; margin-bottom:0.6rem !important;} | |
| /* ํจ๋๊ณผ metric ์นด๋ ๊ฐ๊ฒฉ/๋์ด ์กฐ๊ธ ์ถ์ */ | |
| .panel{margin:8px 0 12px !important; padding:16px 16px 12px !important;} | |
| .metric{min-height:128px !important; padding:18px !important;} | |
| .metric .value{margin:4px 0 6px !important;} | |
| /* ๊ตฌ๋ถ์ ๊ฐ๊ฒฉ */ | |
| hr, .stDivider{margin:10px 0 !important;} | |
| /* ์น์ ์ฌ์ด ๋ง์ง ์กฐ๊ธ์ฉ ์ค์ด๊ธฐ */ | |
| .stMarkdown, [data-testid="stMarkdownContainer"]{margin:0 !important;} | |
| /* ์บก์ -์ต์คํฌ๋ ํ์ดํธ ๋ฌถ๊ธฐ */ | |
| .tight-block .stCaption, .tight-block small{margin-top:0 !important; display:block;} | |
| .tight-block [data-testid="stExpander"] > details{margin-top:6px !important;} | |
| </style> | |
| """, unsafe_allow_html=True) | |
| # ========================================= | |
| # ๊ธฐ๋ณธ๊ฐ & ์ ์ญ ์ํ | |
| # ========================================= | |
| DEFAULT_RESULT = {"THINNING": 0.65, "MAX_FAILURE": 1.02} | |
| DISPLAY_LABELS = ["440", "590", "780"] | |
| DISPLAY_TO_MODEL = {"440": "440.0", "590": "590.0", "780": "780.0"} | |
| MATERIAL_THICKNESS_CAP = {"440": 0.17, "590": 0.16, "780": 0.10} | |
| st.session_state.setdefault("history", []) | |
| st.session_state.setdefault("input_mode", "์ง์ ์ ๋ ฅ") | |
| st.session_state.setdefault("material", "590") # ์ง์ ์ ๋ ฅ ๊ธฐ๋ณธ๊ฐ | |
| # ========================================= | |
| # ํ์ผ ํ์ | |
| # ========================================= | |
| def _find_file(name: str): | |
| here = Path(__file__).parent | |
| for p in [here / name, here / "assets" / name, Path.cwd() / name, Path("/mnt/data") / name]: | |
| if p.exists(): | |
| return str(p) | |
| return "" | |
| # ํ์ ์ ์ ๋๊ฒฝ๋ก๋ก ๋ฐ๊ฟ๋ ๋จ | |
| RULES_XLSX = _find_file("ํ์๋ณ_ํ์ฉ๋ฒ์์ ๋ฆฌํ.xlsx") | |
| SWEEP_XLSX = _find_file("์ง๊ฒฝ๋ณ_์ค๊ณ๋ณ๊ฒฝํ์ฉ๋ฒ์.xlsx") | |
| # ========================================= | |
| # ์กฐํฉ ์์ฑ & ํํฐ ์ ํธ | |
| # ========================================= | |
| def dseq(start: float, stop: float, step: float, q="0.001") -> List[float]: | |
| s, e, stp = map(lambda x: Decimal(str(x)), [start, stop, step]) | |
| vals, cur = [], s | |
| while cur <= e + Decimal("1e-12"): | |
| vals.append(float(cur.quantize(Decimal(q)))) | |
| cur += stp | |
| return vals | |
| def bead_to_lr(bead_value: Union[str, None]) -> Tuple[int, int]: | |
| mapping = {None:(2,2),"none":(0,0),"right":(0,1),"left":(1,0),"double":(1,1)} | |
| key = bead_value.lower() if isinstance(bead_value, str) else bead_value | |
| return mapping.get(key, (0, 0)) | |
| def make_all_combinations(cfg: Dict) -> pd.DataFrame: | |
| bead_values = cfg.get("beads") or [None] | |
| bead_info = [(b, *bead_to_lr(b)) for b in bead_values] | |
| materials = cfg["materials"] | |
| thickness = dseq(cfg["min_thickness"], cfg["max_thickness"], cfg["thickness_step"]) | |
| diameter = [int(x) for x in dseq(cfg["min_diameter"], cfg["max_diameter"], cfg["diameter_step"], q="1")] | |
| upper_r = dseq(cfg["upper_min"], cfg["upper_max"], cfg["upper_step"]) | |
| lower_r = dseq(cfg["lower_min"], cfg["lower_max"], cfg["lower_step"]) | |
| degree = [int(x) for x in dseq(cfg["min_degree"], cfg["max_degree"], cfg["degree_step"], q="1")] | |
| grid = itertools.product(materials, thickness, upper_r, lower_r, diameter, degree, bead_info) | |
| rows = [] | |
| for mat, th, ur, lr, dia, deg, bead_t in grid: | |
| bead_name, lb, rb = bead_t | |
| rows.append((mat, th, ur, lr, dia, deg, bead_name, lb, rb)) | |
| df = pd.DataFrame( | |
| rows, | |
| columns=["material", "thickness", "upper_radius", "lower_radius", | |
| "diameter", "degree", "bead", "LB", "RB"] | |
| ) | |
| return df | |
| # ----- Sweep ํ๊ณ ----- | |
| def build_limit_dicts(df_sweep: pd.DataFrame): | |
| t = df_sweep.copy().replace("F", np.nan) | |
| if "Sweep" in t.columns: | |
| t = t.set_index("Sweep") | |
| new_cols = [] | |
| for c in t.columns: | |
| try: new_cols.append(int(c)) | |
| except: new_cols.append(c) | |
| t.columns = new_cols | |
| long = t.stack(dropna=False).reset_index() | |
| long.columns = ["row", "degree", "limit"] | |
| tmp = long["row"].str.extract(r"(?P<diameter>\d+)_(?P<which>upper|lower)_radius") | |
| long = pd.concat([long, tmp], axis=1) | |
| long["diameter"] = pd.to_numeric(long["diameter"], errors="coerce") | |
| long["degree"] = pd.to_numeric(long["degree"], errors="coerce") | |
| long["limit"] = pd.to_numeric(long["limit"], errors="coerce") | |
| upper = long[long["which"]=="upper"].dropna(subset=["diameter","degree"]) | |
| lower = long[long["which"]=="lower"].dropna(subset=["diameter","degree"]) | |
| upper_dict = {(int(d), int(g)): v for d, g, v in zip(upper["diameter"], upper["degree"], upper["limit"]) } | |
| lower_dict = {(int(d), int(g)): v for d, g, v in zip(lower["diameter"], lower["degree"], lower["limit"]) } | |
| return upper_dict, lower_dict | |
| def filter_grid_by_sweep_limits(df_grid: pd.DataFrame, df_sweep: pd.DataFrame) -> pd.DataFrame: | |
| upper_dict, lower_dict = build_limit_dicts(df_sweep) | |
| key = list(zip(df_grid["diameter"].astype(int), df_grid["degree"].astype(int))) | |
| df_grid = df_grid.copy() | |
| df_grid["limit_upper"] = [upper_dict.get(k, np.nan) for k in key] | |
| df_grid["limit_lower"] = [lower_dict.get(k, np.nan) for k in key] | |
| not_nan = df_grid["limit_upper"].notna() & df_grid["limit_lower"].notna() | |
| within = (df_grid["upper_radius"] <= df_grid["limit_upper"]) & \ | |
| (df_grid["lower_radius"] <= df_grid["limit_lower"]) | |
| return df_grid[not_nan & within].reset_index(drop=True) | |
| # ----- ๊ท์น ์ํธ ----- | |
| SHEET_BY_BEAD = {"left":"left", "right":"right", "double":"both", "none":"none"} | |
| def _normalize_input_df(df: pd.DataFrame) -> pd.DataFrame: | |
| df2 = df.copy() | |
| df2.columns = [str(c).strip() for c in df2.columns] | |
| rename = {} | |
| for c in df2.columns: | |
| lc = c.lower().strip() | |
| if lc == "diamater": rename[c] = "diameter" | |
| elif lc in ("material","diameter","degree","bead"): rename[c] = lc | |
| df2 = df2.rename(columns=rename) | |
| need = {"material","diameter","degree","bead"} | |
| missing = need - set(df2.columns) | |
| if missing: | |
| raise ValueError(f"์ ๋ ฅ ๋ฐ์ดํฐํ๋ ์์ ํ์ํ ์ปฌ๋ผ์ด ์์ต๋๋ค: {missing}") | |
| df2["bead"] = df2["bead"].astype(str).str.strip().str.lower() | |
| for c in ["material","diameter","degree"]: | |
| df2[c] = pd.to_numeric(df2[c], errors="coerce") | |
| return df2.dropna(subset=["material","diameter","degree"]).copy() | |
| def _read_rule_sheet(xlsx_path: str, sheet_name: str) -> pd.DataFrame: | |
| rule = pd.read_excel(xlsx_path, sheet_name=sheet_name) | |
| rule.columns = rule.columns.str.strip().str.lower() | |
| rule = rule.rename(columns={"diamater":"diameter"}) | |
| need = {"material","diameter","min_degree","max_degree"} | |
| missing = need - set(rule.columns) | |
| if missing: | |
| raise ValueError(f"๊ท์น ์ํธ '{sheet_name}'์ ํ์ํ ์ปฌ๋ผ์ด ์์ต๋๋ค: {missing}") | |
| for c in need: rule[c] = pd.to_numeric(rule[c], errors="coerce") | |
| rule = rule.dropna(subset=list(need)).copy() | |
| rule = rule.astype({"material":"int64","diameter":"int64"}) | |
| return rule[["material","diameter","min_degree","max_degree"]] | |
| def _apply_rules(df_part: pd.DataFrame, rule: pd.DataFrame) -> pd.DataFrame: | |
| if df_part.empty: return df_part.copy() | |
| df_part = df_part[df_part["material"].isin(rule["material"].unique())].copy() | |
| if df_part.empty: return df_part | |
| merged = df_part.merge(rule, on=["material","diameter"], how="left") | |
| mask = ( | |
| merged["min_degree"].notna() | |
| & merged["max_degree"].notna() | |
| & (merged["degree"] >= merged["min_degree"]) | |
| & (merged["degree"] <= merged["max_degree"]) | |
| ) | |
| return merged.loc[mask, df_part.columns].reset_index(drop=True) | |
| def filter_all_by_bead(df: pd.DataFrame, rules_xlsx: str) -> pd.DataFrame: | |
| base = _normalize_input_df(df) | |
| outs = [] | |
| for bead_value, sheet in SHEET_BY_BEAD.items(): | |
| part = base[base["bead"] == bead_value].copy() | |
| if part.empty: continue | |
| rule = _read_rule_sheet(rules_xlsx, sheet) | |
| outs.append(_apply_rules(part, rule)) | |
| if not outs: return base.iloc[0:0].copy() | |
| return pd.concat(outs, axis=0, ignore_index=True).reset_index(drop=True) | |
| # ========================================= | |
| # ์์ธก๊ธฐ (๋ ํ๊น ๋์) | |
| # ========================================= | |
| import torch | |
| import torch.nn as nn | |
| import lightgbm as lgb | |
| ART_DIR_MF_DEFAULT = "artifacts_blend" | |
| ART_DIR_THIN_DEFAULT = "artifacts_blend_thinning" | |
| CAT_COL_DEFAULT = "material" | |
| NUM_COLS_DEFAULT = ["thickness","diameter","degree","upper_radius","lower_radius","LB","RB"] | |
| class FTTransformer(nn.Module): | |
| def __init__(self, n_materials:int, n_num:int, d_model:int=192, nhead:int=8, | |
| num_layers:int=4, dim_ff:int=768, dropout:float=0.15): | |
| super().__init__() | |
| self.mat_emb = nn.Embedding(n_materials, d_model) | |
| self.num_linears = nn.ModuleList([nn.Linear(1, d_model) for _ in range(n_num)]) | |
| self.cls = nn.Parameter(torch.zeros(1, 1, d_model)) | |
| nn.init.trunc_normal_(self.cls, std=0.02) | |
| enc_layer = nn.TransformerEncoderLayer( | |
| d_model=d_model, nhead=nhead, dim_feedforward=dim_ff, | |
| dropout=dropout, batch_first=True, activation='gelu', norm_first=True | |
| ) | |
| self.encoder = nn.TransformerEncoder(enc_layer, num_layers=num_layers) | |
| self.head = nn.Sequential(nn.LayerNorm(d_model), nn.Linear(d_model, d_model), nn.GELU(), nn.Dropout(dropout), nn.Linear(d_model, 1)) | |
| def forward(self, mat_ids, x_num): | |
| B = x_num.size(0) | |
| mat_tok = self.mat_emb(mat_ids).unsqueeze(1) | |
| num_tok = torch.cat([lin(x_num[:, i:i+1]).unsqueeze(1) for i, lin in enumerate(self.num_linears)], dim=1) | |
| tokens = torch.cat([self.cls.expand(B, -1, -1), mat_tok, num_tok], dim=1) | |
| h = self.encoder(tokens) | |
| return self.head(h[:, 0, :]) | |
| def _scale_like_fold(X_num: np.ndarray, mean: np.ndarray, scale: np.ndarray) -> np.ndarray: | |
| return ((X_num - mean) / scale).astype(np.float32) | |
| def _canonize_list(materials): return [str(m).strip() for m in materials] | |
| def _build_alias2canon(canon_list): | |
| alias2canon = {} | |
| for c in canon_list: | |
| alias2canon[c] = c | |
| s = c.strip(); alias2canon[s] = c | |
| if "." in s: alias2canon[s.rstrip("0").rstrip(".")] = c | |
| try: | |
| v = float(s); alias2canon[str(v)] = c | |
| if v.is_integer(): alias2canon[str(int(v))] = c | |
| except: pass | |
| return alias2canon | |
| def _first_existing(*paths): | |
| for p in paths: | |
| if os.path.exists(p): return p | |
| return None | |
| def _load_json_like(art_dir: str, basename: str) -> dict: | |
| p1 = os.path.join(art_dir, f"{basename}.json"); p2 = os.path.join(art_dir, basename) | |
| p = _first_existing(p1, p2) | |
| if p is None: raise FileNotFoundError(f"Missing {basename}(.json) in {self.art_dir}") | |
| import json; return json.load(open(p, "r", encoding="utf-8")) | |
| def _load_columns_meta(art_dir: str): | |
| p = _first_existing(os.path.join(art_dir, "columns_thinning.json"), os.path.join(art_dir, "columns.json")) | |
| if not p: return None | |
| import json; return json.load(open(p, "r", encoding="utf-8")) | |
| class _SingleTargetBlendPredictor: | |
| def __init__(self, art_dir:str, lgbm_prefix:str, ftt_prefix:str, alpha_json:str, | |
| cat_col_default:str=CAT_COL_DEFAULT, num_cols_default:List[str]=None, | |
| allow_columns_meta:bool=False, unknown_policy:str="error"): | |
| self.art_dir = art_dir; self.lgbm_prefix=lgbm_prefix; self.ftt_prefix=ftt_prefix | |
| self.alpha_json=alpha_json; self.unknown_policy=unknown_policy | |
| self.cat_col = cat_col_default; self.num_cols = list(num_cols_default or NUM_COLS_DEFAULT) | |
| if allow_columns_meta: | |
| meta = _load_columns_meta(art_dir) | |
| if meta: self.cat_col = meta.get("cat_col", self.cat_col); self.num_cols = meta.get("num_cols", self.num_cols) | |
| self.folds_ft = self._load_ft_folds() | |
| self.boosters = self._load_lgbm_folds() | |
| self.materials = self._load_materials() | |
| self.best_alpha = float(_load_json_like(art_dir, self.alpha_json)["best_alpha"]) | |
| self.materials_canon = _canonize_list(self.materials) | |
| self.alias2canon = _build_alias2canon(self.materials_canon) | |
| self.mat2id = {m:i for i,m in enumerate(self.materials_canon)} | |
| def _load_ft_folds(self): | |
| folds=[] | |
| for fold in range(1,11): | |
| p = os.path.join(self.art_dir, f"{self.ftt_prefix}{fold}.pt") | |
| if not os.path.exists(p): | |
| if folds: break | |
| continue | |
| ckpt = torch.load(p, map_location="cpu", weights_only=False) | |
| model = FTTransformer(len(ckpt["materials"]), len(ckpt["num_cols"])) | |
| model.load_state_dict(ckpt["state_dict"]); model.eval() | |
| folds.append({"model":model,"materials":ckpt["materials"],"num_cols":ckpt["num_cols"], | |
| "scaler_mean":np.array(ckpt["scaler_mean"],dtype=np.float32), | |
| "scaler_scale":np.array(ckpt["scaler_scale"],dtype=np.float32)}) | |
| if not folds: raise FileNotFoundError(f"No FT checkpoints found in {self.art_dir} (prefix={self.ftt_prefix})") | |
| return folds | |
| def _load_lgbm_folds(self): | |
| boosters=[] | |
| for fold in range(1,11): | |
| p = _first_existing(os.path.join(self.art_dir,f"{self.lgbm_prefix}{fold}.txt"), | |
| os.path.join(self.art_dir,f"{self.lgbm_prefix}{fold}")) | |
| if p is None: | |
| if boosters: break | |
| continue | |
| boosters.append(lgb.Booster(model_file=p)) | |
| if not boosters: raise FileNotFoundError(f"No LightGBM model files found in {self.art_dir} (prefix={self.lgbm_prefix})") | |
| return boosters | |
| def _load_materials(self): | |
| try: return _load_json_like(self.art_dir,"materials")["materials"] | |
| except FileNotFoundError: return self.folds_ft[0]["materials"] | |
| def _prep_df(self, df_new: pd.DataFrame) -> pd.DataFrame: | |
| df = df_new.copy() | |
| need = [self.cat_col] + self.num_cols | |
| missing = [c for c in need if c not in df.columns] | |
| if missing: raise ValueError(f"Missing columns in input: {missing}") | |
| df[self.cat_col] = df[self.cat_col].astype(str).str.strip() | |
| df["_mat_canon"] = df[self.cat_col].map(self.alias2canon) | |
| if self.unknown_policy == "error": | |
| unknown = df.loc[df["_mat_canon"].isna(), self.cat_col].unique().tolist() | |
| if unknown: raise ValueError(f"Unknown materials in input {unknown}. Known materials: {self.materials_canon[:10]}{' ...' if len(self.materials_canon)>10 else ''}") | |
| df["_mat_id"] = df["_mat_canon"].map(self.mat2id).astype(int) | |
| else: | |
| df["_mat_canon"] = df["_mat_canon"].fillna(self.materials_canon[0]) | |
| df["_mat_id"] = df["_mat_canon"].map(self.mat2id).astype(int) | |
| df[self.num_cols] = df[self.num_cols].apply(pd.to_numeric, errors="coerce") | |
| if df[self.num_cols].isnull().any().any(): | |
| bad = df[self.num_cols].columns[df[self.num_cols].isnull().any()].tolist() | |
| raise ValueError(f"Non-numeric values detected in columns: {bad}") | |
| return df | |
| def predict_ft(self, df_new: pd.DataFrame) -> np.ndarray: | |
| df = self._prep_df(df_new); mids = torch.tensor(df["_mat_id"].values, dtype=torch.long) | |
| preds=[] | |
| for f in self.folds_ft: | |
| Xn = df[f["num_cols"]].values.astype(np.float32) | |
| x_scaled = _scale_like_fold(Xn, f["scaler_mean"], f["scaler_scale"]) | |
| with torch.no_grad(): | |
| p = f["model"](mids, torch.tensor(x_scaled, dtype=torch.float32)).cpu().numpy().ravel() | |
| preds.append(p) | |
| return np.mean(preds, axis=0) | |
| def predict_lgbm(self, df_new: pd.DataFrame) -> np.ndarray: | |
| df = self._prep_df(df_new) | |
| X = df[[self.cat_col] + self.num_cols].copy() | |
| X[self.cat_col] = pd.Categorical(df["_mat_canon"], categories=self.materials_canon) | |
| preds = [bst.predict(X, num_iteration=getattr(bst,"best_iteration",None)) for bst in self.boosters] | |
| return np.mean(preds, axis=0) | |
| def predict_blend(self, df_new: pd.DataFrame, alpha: float|None=None) -> np.ndarray: | |
| alpha = self.best_alpha if alpha is None else alpha | |
| return alpha * self.predict_ft(df_new) + (1 - alpha) * self.predict_lgbm(df_new) | |
| class MultiTargetBlendPredictor: | |
| def __init__(self, art_dir_mf:str, art_dir_thin:str, unknown_policy:str="error"): | |
| self.mf = _SingleTargetBlendPredictor(art_dir=art_dir_mf, lgbm_prefix="lgbm_fold", ftt_prefix="ftt_fold", | |
| alpha_json="blend_alpha", allow_columns_meta=False, | |
| unknown_policy=unknown_policy) | |
| self.thin = _SingleTargetBlendPredictor(art_dir=art_dir_thin, lgbm_prefix="lgbm_thinning_fold", | |
| ftt_prefix="ftt_thinning_fold", alpha_json="blend_alpha_thinning", | |
| allow_columns_meta=True, unknown_policy=unknown_policy) | |
| def predict_both(self, df_new: pd.DataFrame, alpha_mf: float|None=None, alpha_th: float|None=None): | |
| return { | |
| "blend_max_failure": self.mf.predict_blend(df_new, alpha_mf), | |
| "blend_thinning": self.thin.predict_blend(df_new, alpha_th), | |
| "lgbm_max_failure": self.mf.predict_blend(df_new, 0.0), | |
| "dl_max_failure": self.mf.predict_blend(df_new, 1.0), | |
| "lgbm_thinning": self.thin.predict_blend(df_new, 0.0), | |
| "dl_thinning": self.thin.predict_blend(df_new, 1.0), | |
| } | |
| def get_predictor(): | |
| art_mf = os.environ.get("FS_MF_ART_DIR", ART_DIR_MF_DEFAULT) | |
| art_th = os.environ.get("FS_THIN_ART_DIR", ART_DIR_THIN_DEFAULT) | |
| return MultiTargetBlendPredictor(art_dir_mf=art_mf, art_dir_thin=art_th, unknown_policy="fallback0") | |
| def predict_both_blend(df: pd.DataFrame): | |
| out = get_predictor().predict_both(df) | |
| return out["blend_max_failure"], out["blend_thinning"] | |
| # ========================================= | |
| # UI ์ ํธ | |
| # ========================================= | |
| def bead_to_flags_ui(bead: str): | |
| if bead == "Left Bead": return 1,0 | |
| if bead == "Right Bead": return 0,1 | |
| if bead == "Double Bead": return 1,1 | |
| return 0,0 | |
| def _bead_key_from_label(label: str) -> str: | |
| return {"No Bead":"none","Left Bead":"left","Right Bead":"right","Double Bead":"double"}[label] | |
| # (์ ๊ทธ๋ ์ด๋) ์์ด์ฝ ์ง์ Metric ์นด๋ | |
| def metric_card(label: str, value: float, lo: float, hi: float, icon: str = "๐"): | |
| ok = lo <= float(value) <= hi | |
| cls = "ok" if ok else "bad" | |
| status_text = "์ ์" if ok else "๋ฒ์ ๋ฐ" | |
| st.markdown( | |
| f""" | |
| <div class="metric {cls}" style="text-align:center;"> | |
| <div class="label" style="font-weight:600; margin-bottom:6px;">{icon} {label}</div> | |
| <div class="value" style="font-size:2rem; font-weight:bold;">{float(value):.3f}</div> | |
| <div class="chip" style="margin-top:4px; margin-bottom:4px;">{status_text}</div> | |
| <div class="range" style="font-size:0.85rem; color:gray;">ํ์ฉ๋ฒ์: {lo:.2f} ~ {hi:.2f}</div> | |
| </div> | |
| """, | |
| unsafe_allow_html=True | |
| ) | |
| def val_or_range(single_key, range_key, unit=""): | |
| mode = st.session_state.get("input_mode", "์ง์ ์ ๋ ฅ") | |
| if mode == "๋ฒ์ ๊ฐ ์ ๋ ฅ" and range_key in st.session_state: | |
| lo, hi = st.session_state[range_key] | |
| return f"{lo}{unit} ~ {hi}{unit}" | |
| elif mode == "์ง์ ์ ๋ ฅ" and single_key in st.session_state: | |
| return f"{st.session_state[single_key]}{unit}" | |
| return "-" | |
| def render_cap_table(): | |
| with st.expander("์ํ ๊ธฐ์คํ ๋ณด๊ธฐ", expanded=False): | |
| st.markdown(""" | |
| <table style="width:100%; border-collapse: collapse;" border="1"> | |
| <tr><th>์ฌ์ง(ํ๊ธฐ)</th><th>๋ชจ๋ธ ๋ผ๋ฒจ</th><th>๋๊ป ๊ฐ์์จ(ํ์ฉ ์ํ)</th></tr> | |
| <tr><td>440</td><td>440.0</td><td style="color:red">0.17</td></tr> | |
| <tr><td>590</td><td>590.0</td><td style="color:red">0.16</td></tr> | |
| <tr><td>780</td><td>780.0</td><td style="color:red">0.10</td></tr> | |
| </table> | |
| """, unsafe_allow_html=True) | |
| def build_df_single(material_display, thickness, diameter, degree, upperR, lowerR, beadType): | |
| lb, rb = bead_to_flags_ui(beadType) | |
| mat_model = DISPLAY_TO_MODEL[material_display] | |
| return pd.DataFrame([{ | |
| "material": mat_model, | |
| "thickness": float(thickness), | |
| "diameter": int(diameter), | |
| "degree": int(degree), | |
| "upper_radius": float(upperR), | |
| "lower_radius": float(lowerR), | |
| "LB": int(lb), "RB": int(rb), | |
| }]) | |
| def get_sweep_df(): | |
| return pd.read_excel(SWEEP_XLSX, sheet_name=0) if SWEEP_XLSX else None | |
| def get_sweep_dicts(): | |
| df = get_sweep_df() | |
| if df is None: return {}, {} | |
| return build_limit_dicts(df) | |
| def validate_direct_input(material, diameter, degree, bead_label, upperR, lowerR): | |
| if RULES_XLSX: | |
| bead_key = _bead_key_from_label(bead_label) | |
| rule = _read_rule_sheet(RULES_XLSX, SHEET_BY_BEAD[bead_key]) | |
| row = rule[(rule["material"] == int(material)) & (rule["diameter"] == int(diameter))] | |
| if not row.empty: | |
| r = row.iloc[0]; mn, mx = int(r["min_degree"]), int(r["max_degree"]) | |
| if not (mn <= int(degree) <= mx): | |
| return False, f"๊ฐ๋ {degree}ยฐ๋ ๊ท์น ๋ฒ์({mn}~{mx}ยฐ) ๋ฐ์ ๋๋ค." | |
| else: | |
| return True, None | |
| upper_dict, lower_dict = get_sweep_dicts() | |
| key = (int(diameter), int(degree)) | |
| u = upper_dict.get(key, np.nan); l = lower_dict.get(key, np.nan) | |
| if np.isnan(u) or np.isnan(l): return False, "์ค์ํ์ ์๋ ์ง๊ฒฝ/๊ฐ๋ ์กฐํฉ์ ๋๋ค." | |
| if float(upperR) > float(u) or float(lowerR) > float(l): | |
| return False, f"R ํ๊ณ ์ด๊ณผ: ์๋จR โค {u}, ํ๋จR โค {l} ์ด์ด์ผ ํฉ๋๋ค." | |
| return True, None | |
| def _reset_optimum_summary(): | |
| for k in ["best_filter_thin","best_filter_mf","best_all_thin","best_all_mf"]: | |
| st.session_state.pop(k, None) | |
| st.session_state.pop("topcard_source", None) | |
| # ========================================= | |
| # ํญ UI | |
| # ========================================= | |
| tabs = st.tabs(["์กฐ๊ฑด ์ค์ & ์คํ", "๊ฒฐ๊ณผ ์๊ฐํ", "๊ธฐ๋ก ์กฐํ"]) | |
| # ----------------------------------------- | |
| # 1) ์กฐ๊ฑด ์ค์ & ์คํ | |
| # ----------------------------------------- | |
| with tabs[0]: | |
| st.header("์กฐ๊ฑด ์ค์ ") | |
| st.markdown("---") | |
| st.subheader("์ ๋ ฅ ๋ชจ๋") | |
| st.session_state["input_mode"] = st.radio("์ ๋ ฅ ๋ฐฉ์", ["์ง์ ์ ๋ ฅ", "๋ฒ์ ๊ฐ ์ ๋ ฅ"], horizontal=True, label_visibility="collapsed", key="mode_radio") | |
| _prev_mode = st.session_state.get("_prev_input_mode") | |
| if _prev_mode is not None and _prev_mode != st.session_state["input_mode"]: | |
| _reset_optimum_summary() | |
| st.session_state["_prev_input_mode"] = st.session_state["input_mode"] | |
| col_b1, col_b2 = st.columns(2) | |
| if st.session_state["input_mode"] == "์ง์ ์ ๋ ฅ": | |
| with col_b1: | |
| st.session_state["beadType"] = st.selectbox("๋น๋ ํ์ ์ ํ", ["No Bead","Double Bead","Left Bead","Right Bead"]) | |
| with col_b2: | |
| cur = st.session_state.get("material", "590") | |
| idx = DISPLAY_LABELS.index(cur) if cur in DISPLAY_LABELS else 1 | |
| st.session_state["material"] = st.selectbox("์ฌ์ง", DISPLAY_LABELS, index=idx) | |
| else: | |
| with col_b1: | |
| st.session_state["beadTypes_multi"] = st.multiselect( | |
| "๋น๋ ํ์ ์ ํ (๋ณต์ ๊ฐ๋ฅ)", ["No Bead","Double Bead","Left Bead","Right Bead"], | |
| default=["Right Bead"]) | |
| with col_b2: | |
| default_materials = [st.session_state.get("material","590")] | |
| st.session_state["materials_multi"] = st.multiselect( | |
| "์ฌ์ง (๋ณต์ ๊ฐ๋ฅ)", DISPLAY_LABELS, default=default_materials) | |
| st.subheader("์ฑํ ์กฐ๊ฑด") | |
| st.markdown("<div style='height:6px'></div>", unsafe_allow_html=True) | |
| if st.session_state["input_mode"] == "์ง์ ์ ๋ ฅ": | |
| for key in ["diameterRange","degreeRange","upperRRange","lowerRRange","thicknessRange"]: | |
| st.session_state.pop(key, None) | |
| col_t, col_d = st.columns(2) | |
| with col_t: | |
| st.session_state["thickness"] = st.selectbox("์์ฌ ๋๊ป (mm)", [0.7,0.8,0.9,1.0,1.1,1.2], index=2) | |
| with col_d: | |
| st.session_state["diameter"] = st.number_input("์ง๊ฒฝ (mm)", 10, 1000, 20) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.session_state["upperR"] = st.number_input("์๋จ R", 1, 100, 4) | |
| with col2: | |
| st.session_state["lowerR"] = st.number_input("ํ๋จ R", 1, 100, 3) | |
| st.session_state["degree"] = st.number_input("๊ฐ๋ (ยฐ)", 0, 90, 75) | |
| if st.button("์๋ฎฌ๋ ์ด์ ์คํํ๊ธฐ", use_container_width=True, type="primary"): | |
| _reset_optimum_summary() | |
| ok, err = validate_direct_input( | |
| st.session_state["material"], st.session_state["diameter"], st.session_state["degree"], | |
| st.session_state["beadType"], st.session_state["upperR"], st.session_state["lowerR"] | |
| ) | |
| if not ok: | |
| st.error(f"๊ท์น ์๋ฐ: {err}") | |
| st.stop() | |
| df = build_df_single( | |
| st.session_state["material"], st.session_state["thickness"], st.session_state["diameter"], | |
| st.session_state["degree"], st.session_state["upperR"], st.session_state["lowerR"], | |
| st.session_state["beadType"], | |
| ) | |
| try: | |
| with st.spinner("๋ชจ๋ธ ์์ธก ์ค์ ๋๋คโฆ"): | |
| mf_arr, th_arr = predict_both_blend(df) | |
| mf = float(mf_arr[0]); th = float(th_arr[0]) | |
| except Exception as e: | |
| st.error(f"๋ชจ๋ธ ์์ธก ์คํจ: {e}"); st.stop() | |
| st.session_state.sim_result = {"THINNING": th, "MAX_FAILURE": mf} | |
| st.session_state.topcard_source = "single" | |
| st.success("โ ์๋ฎฌ๋ ์ด์ ์๋ฃ! (Blend ๋ชจ๋ธ ์ฌ์ฉ)") | |
| else: | |
| for key in ["diameter","degree","upperR","lowerR","thickness"]: | |
| st.session_state.pop(key, None) | |
| col_t, col_d = st.columns(2) | |
| with col_t: | |
| st.session_state["thicknessRange"] = st.slider("์์ฌ ๋๊ป ๋ฒ์ (mm)", 0.7, 1.2, (0.7, 1.0), step=0.1) | |
| with col_d: | |
| st.session_state["diameterRange"] = st.slider("์ง๊ฒฝ ๋ฒ์ (mm)", 10, 50, (15, 30)) | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| st.session_state["upperRRange"] = st.slider("์๋จ R ๋ฒ์", 1, 15, (3, 7)) | |
| with col2: | |
| st.session_state["lowerRRange"] = st.slider("ํ๋จ R ๋ฒ์", 1, 10, (2, 4)) | |
| st.session_state["degreeRange"] = st.slider("๊ฐ๋ ๋ฒ์ (ยฐ)", 60, 90, (72, 87)) | |
| st.divider() | |
| st.subheader("๊ฒฐ๊ณผ ํํฐ ์กฐ๊ฑด (์ ํ)") | |
| st.session_state["apply_post_filter"] = st.checkbox("๊ฒฐ๊ณผ ํํฐ ์ ์ฉ (THINNING โค, MAX FAILURE โค)", value=False) | |
| selected_mats = st.session_state.get("materials_multi", []) or DISPLAY_LABELS | |
| caps = [MATERIAL_THICKNESS_CAP[m] for m in selected_mats] | |
| default_thin_cap = float(min(caps)) if len(caps) else 0.16 | |
| f2, f3 = st.columns([1,1]) | |
| with f2: | |
| st.session_state.setdefault("filter_thinning_max", default_thin_cap) | |
| st.session_state["filter_thinning_max"] = st.number_input( | |
| "THINNING โค", 0.0, 1.0, float(st.session_state["filter_thinning_max"]), | |
| step=0.01, format="%.2f", disabled=not st.session_state["apply_post_filter"] | |
| ) | |
| with f3: | |
| st.session_state.setdefault("filter_max_failure_max", 1.0) | |
| st.session_state["filter_max_failure_max"] = st.number_input( | |
| "MAX FAILURE โค", 0.0, 2.0, float(st.session_state["filter_max_failure_max"]), | |
| step=0.01, format="%.2f", disabled=not st.session_state["apply_post_filter"] | |
| ) | |
| if st.button("์๋ฎฌ๋ ์ด์ ์คํํ๊ธฐ", use_container_width=True, type="primary"): | |
| _reset_optimum_summary() | |
| bead_keys = [_bead_key_from_label(b) for b in (st.session_state.get("beadTypes_multi") or ["Right Bead"])] | |
| mats_disp = st.session_state.get("materials_multi") or DISPLAY_LABELS | |
| mats_int = [int(m) for m in mats_disp] | |
| cfg = { | |
| "materials": mats_int, | |
| "min_thickness": st.session_state["thicknessRange"][0], | |
| "max_thickness": st.session_state["thicknessRange"][1], | |
| "thickness_step": 0.1, | |
| "min_diameter": st.session_state["diameterRange"][0], | |
| "max_diameter": st.session_state["diameterRange"][1], | |
| "diameter_step": 1, | |
| "upper_min": st.session_state["upperRRange"][0], | |
| "upper_max": st.session_state["upperRRange"][1], | |
| "upper_step": 1.0, | |
| "lower_min": st.session_state["lowerRRange"][0], | |
| "lower_max": st.session_state["lowerRRange"][1], | |
| "lower_step": 1.0, | |
| "min_degree": st.session_state["degreeRange"][0], | |
| "max_degree": st.session_state["degreeRange"][1], | |
| "degree_step": 1, | |
| "beads": bead_keys, | |
| } | |
| df_all = make_all_combinations(cfg) | |
| if RULES_XLSX: | |
| df_all = filter_all_by_bead(df_all, RULES_XLSX) | |
| else: | |
| st.warning("๊ท์น ํ์ผ์ ์ฐพ์ง ๋ชปํ์ต๋๋ค. (ํ์๋ณ_ํ์ฉ๋ฒ์์ ๋ฆฌํ.xlsx)") | |
| sweep_df = get_sweep_df() | |
| if sweep_df is not None and not df_all.empty: | |
| df_all = filter_grid_by_sweep_limits(df_all, sweep_df) | |
| elif sweep_df is None: | |
| st.warning("์ค์ ํ๊ณ ํ์ผ์ ์ฐพ์ง ๋ชปํ์ต๋๋ค. (์ง๊ฒฝ๋ณ_์ค๊ณ๋ณ๊ฒฝํ์ฉ๋ฒ์.xlsx)") | |
| if df_all.empty: | |
| st.warning("๊ท์น/์ค์ ํ๊ณ ์ ์ฉ ํ ๋จ๋ ์กฐํฉ์ด ์์ต๋๋ค.") | |
| st.stop() | |
| pred_df = df_all.copy() | |
| pred_df["material"] = pred_df["material"].astype(int).astype(str).map(DISPLAY_TO_MODEL) | |
| try: | |
| with st.spinner("๋ชจ๋ธ ์์ธก ์ค์ ๋๋คโฆ"): | |
| mf_pred, th_pred = predict_both_blend(pred_df) | |
| except Exception as e: | |
| st.error(f"๋ชจ๋ธ ์์ธก ์คํจ: {e}"); st.stop() | |
| df_all["THINNING"] = th_pred | |
| df_all["MAX_FAILURE"] = mf_pred | |
| if st.session_state.get("apply_post_filter", False): | |
| thin_thr = float(st.session_state["filter_thinning_max"]) | |
| mf_thr = float(st.session_state["filter_max_failure_max"]) | |
| ok_mask = (df_all["THINNING"] <= thin_thr) & (df_all["MAX_FAILURE"] <= mf_thr) | |
| matched = df_all.loc[ok_mask].copy() | |
| else: | |
| matched = df_all.copy() | |
| total = len(df_all) | |
| def _best_rows(df_ok: pd.DataFrame, df_all: pd.DataFrame): | |
| best = {"filter_thin": None, "filter_mf": None, "all_thin": None, "all_mf": None} | |
| if len(df_ok): | |
| best["filter_thin"] = df_ok.loc[df_ok["THINNING"].idxmin()] | |
| best["filter_mf"] = df_ok.loc[df_ok["MAX_FAILURE"].idxmin()] | |
| best["all_thin"] = df_all.loc[df_all["THINNING"].idxmin()] | |
| best["all_mf"] = df_all.loc[df_all["MAX_FAILURE"].idxmin()] | |
| return best | |
| best = _best_rows(matched if len(matched) else df_all, df_all) | |
| st.session_state.best_filter_thin = None if matched.empty else best["filter_thin"].to_dict() | |
| st.session_state.best_filter_mf = None if matched.empty else best["filter_mf"].to_dict() | |
| st.session_state.best_all_thin = best["all_thin"].to_dict() | |
| st.session_state.best_all_mf = best["all_mf"].to_dict() | |
| if len(matched) and st.session_state.get("apply_post_filter", False): | |
| st.session_state.sim_result = { | |
| "THINNING": float(best["filter_thin"]["THINNING"]), | |
| "MAX_FAILURE": float(best["filter_mf"]["MAX_FAILURE"]), | |
| } | |
| st.session_state.topcard_source = "filter" | |
| else: | |
| st.session_state.sim_result = { | |
| "THINNING": float(best["all_thin"]["THINNING"]), | |
| "MAX_FAILURE": float(best["all_mf"]["MAX_FAILURE"]), | |
| } | |
| st.session_state.topcard_source = "overall" | |
| if len(matched): | |
| now_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") | |
| matched = matched.copy() | |
| matched["์ ํ"] = False | |
| matched["Index"] = None | |
| matched["์ ์ฅ์๊ฐ"] = now_str | |
| matched["์ฌ์ง"] = matched["material"].astype(int).astype(str) | |
| start_idx = len(st.session_state.history) | |
| for i in range(len(matched)): | |
| matched.at[matched.index[i],"Index"] = start_idx + i + 1 | |
| matched = matched[[ | |
| "์ ํ","Index","์ ์ฅ์๊ฐ","bead", | |
| "thickness","์ฌ์ง","diameter","degree","upper_radius","lower_radius", | |
| "THINNING","MAX_FAILURE" | |
| ]].rename(columns={ | |
| "bead":"๋น๋ ํ์ ", | |
| "thickness":"์์ฌ ๋๊ป (mm)", | |
| "upper_radius":"์๋จ R", | |
| "lower_radius":"ํ๋จ R", | |
| }) | |
| st.session_state.history.extend(matched.to_dict("records")) | |
| if st.session_state.get("apply_post_filter", False): | |
| st.success(f"โ ์ด {total}๊ฐ ์กฐํฉ ์ค **ํํฐ๋ฅผ ๋ง์กฑํ {len(matched)}๊ฐ**๋ฅผ ๊ธฐ๋ก์ ์ถ๊ฐํ์ต๋๋ค.") | |
| else: | |
| st.success(f"โ ํํฐ ๋ฏธ์ ์ฉ: **์ด {len(matched)}๊ฐ ์ ์ฒด ์กฐํฉ**์ ๊ธฐ๋ก์ ์ถ๊ฐํ์ต๋๋ค.") | |
| else: | |
| if st.session_state.get("apply_post_filter", False): | |
| st.warning(f"ํํฐ ์กฐ๊ฑด์ ๋ง์กฑํ๋ ์กฐํฉ์ด ์์ต๋๋ค. (์ด {total}๊ฐ ์กฐํฉ)") | |
| else: | |
| st.warning("๋จ๋ ์กฐํฉ์ด ์์ต๋๋ค.") | |
| # ----------------------------------------- | |
| # 2) ๊ฒฐ๊ณผ ์๊ฐํ | |
| # ----------------------------------------- | |
| with tabs[1]: | |
| st.header("๊ฒฐ๊ณผ ์๊ฐํ") | |
| # (์ ํ) ์ด ํญ์์ hr/st.divider๋ฅผ ์จ๊ธฐ๊ณ ์ถ์ผ๋ฉด ์ฃผ์ ํด์ | |
| # st.markdown(""" | |
| # <style>#results-tab hr, #results-tab .stDivider{display:none!important}</style> | |
| # <div id="results-tab">""", unsafe_allow_html=True) | |
| result_data = st.session_state.get("sim_result", DEFAULT_RESULT) | |
| src = st.session_state.get("topcard_source", "") | |
| if src == "filter": st.caption("์นด๋ ๊ฐ: **ํํฐ ๋ด ์ต์ ** (THINNING ์ต์ / MAX FAILURE ์ต์)") | |
| elif src == "overall": st.caption("์นด๋ ๊ฐ: **์ ์ฒด ํ์ ์ต์ ** (THINNING ์ต์ / MAX FAILURE ์ต์)") | |
| elif src == "single": st.caption("์นด๋ ๊ฐ: **๋จ์ผ ์ ๋ ฅ ๊ฒฐ๊ณผ**") | |
| col1, col2 = st.columns(2, gap="small") | |
| with col1: | |
| st.session_state.setdefault("material", "590") | |
| cur_mat = st.session_state["material"] | |
| thin_cap = MATERIAL_THICKNESS_CAP.get(cur_mat, 0.16) | |
| st.session_state["thinning_min"] = 0.0 | |
| st.session_state["thinning_max"] = thin_cap | |
| metric_card("THINNING (๋๊ป ๊ฐ์์จ)", | |
| result_data.get("THINNING", 0.0), | |
| float(st.session_state["thinning_min"]), | |
| float(st.session_state["thinning_max"])) | |
| with col2: | |
| st.session_state.setdefault("max_failure_min", 0.00) | |
| st.session_state.setdefault("max_failure_max", 0.97) | |
| metric_card("MAX FAILURE", | |
| result_data.get("MAX_FAILURE", 0.0), | |
| float(st.session_state["max_failure_min"]), | |
| float(st.session_state["max_failure_max"])) | |
| cL, cR = st.columns(2, gap="small") | |
| def _summary_block(title: str, row_thin: pd.Series, row_mf: pd.Series): | |
| st.subheader(title) | |
| st.markdown("**THINNING ์ต์**") | |
| if row_thin is not None and len(row_thin): | |
| lb, rb = int(row_thin.get('LB',0)), int(row_thin.get('RB',0)) | |
| bead_label = ("No Bead" if (lb==0 and rb==0) else ("Left Bead" if (lb==1 and rb==0) | |
| else ("Right Bead" if (lb==0 and rb==1) else "Double Bead"))) | |
| st.markdown( | |
| f"- ์ฌ์ง: **{row_thin.get('material','-')}** (๋น๋: **{bead_label}**)<br>" | |
| f"- ๋๊ป: **{row_thin.get('thickness','-')} mm**, ์ง๊ฒฝ: **{row_thin.get('diameter','-')} mm**, ๊ฐ๋: **{row_thin.get('degree','-')}ยฐ**<br>" | |
| f"- ์๋จ R: **{row_thin.get('upper_radius','-')}**, ํ๋จ R: **{row_thin.get('lower_radius','-')}**<br>" | |
| f"- **THINNING: {row_thin.get('THINNING',0):.3f}**, MAX_FAILURE: {row_thin.get('MAX_FAILURE',0):.3f}", | |
| unsafe_allow_html=True) | |
| else: | |
| st.caption("ํด๋น ์์") | |
| st.markdown("---") | |
| st.markdown("**MAX_FAILURE ์ต์**") | |
| if row_mf is not None and len(row_mf): | |
| lb, rb = int(row_mf.get('LB',0)), int(row_mf.get('RB',0)) | |
| bead_label = ("No Bead" if (lb==0 and rb==0) else ("Left Bead" if (lb==1 and rb==0) | |
| else ("Right Bead" if (lb==0 and rb==1) else "Double Bead"))) | |
| st.markdown( | |
| f"- ์ฌ์ง: **{row_mf.get('material','-')}** (๋น๋: **{bead_label}**)<br>" | |
| f"- ๋๊ป: **{row_mf.get('thickness','-')} mm**, ์ง๊ฒฝ: **{row_mf.get('diameter','-')} mm**, ๊ฐ๋: **{row_mf.get('degree','-')}ยฐ**<br>" | |
| f"- ์๋จ R: **{row_mf.get('upper_radius','-')}**, ํ๋จ R: **{row_mf.get('lower_radius','-')}**<br>" | |
| f"- THINNING: {row_mf.get('THINNING',0):.3f}, **MAX_FAILURE: {row_mf.get('MAX_FAILURE',0):.3f}**", | |
| unsafe_allow_html=True) | |
| else: | |
| st.caption("ํด๋น ์์") | |
| with cL: | |
| _summary_block( | |
| "ํํฐ ๋ด ์ต์ ", | |
| None if st.session_state.get("best_filter_thin") is None else pd.Series(st.session_state["best_filter_thin"]), | |
| None if st.session_state.get("best_filter_mf") is None else pd.Series(st.session_state["best_filter_mf"]), | |
| ) | |
| with cR: | |
| _summary_block( | |
| "์ ์ฒด ํ์ ์ต์ ", | |
| pd.Series(st.session_state.get("best_all_thin", {})) if st.session_state.get("best_all_thin") else None, | |
| pd.Series(st.session_state.get("best_all_mf", {})) if st.session_state.get("best_all_mf") else None, | |
| ) | |
| # โ ๊ธฐ์ค ์ฌ์ง & ์ํํ โ ๊ฐ์ ์์น(ํ ์ด)์ ์ธ๋ก๋ก ์ ๋ ฌ | |
| with st.container(): | |
| material_list = DISPLAY_LABELS | |
| cur = st.session_state.get("material", "590") | |
| default_idx = material_list.index(cur) if cur in material_list else 1 | |
| sel = st.selectbox("๊ธฐ์ค ์ฌ์ง", material_list, index=default_idx, key="material_for_result") | |
| st.session_state["material"] = sel | |
| cap = MATERIAL_THICKNESS_CAP[sel] | |
| st.session_state["thinning_min"] = 0.0 | |
| st.session_state["thinning_max"] = cap | |
| st.caption(f"ํ์ฌ ๊ธฐ์ค ์ฌ์ง: {sel} (๋๊ป ๊ฐ์์จ ์ํ {cap:.2f})") | |
| # ๋ฐ๋ก ์๋์ ์ํ ๊ธฐ์คํ๋ฅผ ๋ถ์ฌ์ ํ์ | |
| st.markdown('<div class="tight-block">', unsafe_allow_html=True) | |
| st.caption("์ฌ์ง๋ณ ๋๊ป ๊ฐ์์จ ์ํ") | |
| render_cap_table() | |
| st.markdown('</div>', unsafe_allow_html=True) | |
| # (์ ํ) ์์์ ์ด์์ผ๋ฉด ๋ซ๊ธฐ | |
| # st.markdown("</div>", unsafe_allow_html=True) | |
| # ----------------------------------------- | |
| # 3) ๊ธฐ๋ก ์กฐํ โ ์ ์ฒด ๊ต์ฒด | |
| # ----------------------------------------- | |
| with tabs[2]: | |
| st.header("๊ธฐ๋ก ์กฐํ") | |
| col1, col2, col3 = st.columns([1, 1, 1]) | |
| with col1: | |
| save_btn = st.button("ํ์ฌ ๊ฒฐ๊ณผ ์ ์ฅ", type="primary", use_container_width=True) | |
| with col2: | |
| select_all_btn = st.button("์ ์ฒด ์ ํ", use_container_width=True) | |
| with col3: | |
| delete_btn = st.button("์ ํ ํญ๋ชฉ ์ญ์ ", use_container_width=True) | |
| # ===== ํ์ฌ ๊ฒฐ๊ณผ ์ ์ฅ ์ฒ๋ฆฌ ===== | |
| if save_btn: | |
| if "sim_result" not in st.session_state: | |
| st.warning("๋จผ์ ์๋ฎฌ๋ ์ด์ ์ ์คํํ์ธ์.") | |
| else: | |
| # ๋ค์ ์ธ๋ฑ์ค | |
| try: | |
| next_idx = max([r.get("Index", 0) for r in st.session_state.history]) + 1 if st.session_state.history else 1 | |
| except Exception: | |
| next_idx = len(st.session_state.history) + 1 | |
| # ์ ๋ ฅ ๋ชจ๋/๋ผ๋ฒจ | |
| input_mode = st.session_state.get("input_mode", "์ง์ ์ ๋ ฅ") | |
| if input_mode == "์ง์ ์ ๋ ฅ": | |
| bead_label = st.session_state.get("beadType", "-") | |
| material_label = st.session_state.get("material", "-") | |
| else: | |
| bead_label = ", ".join(st.session_state.get("beadTypes_multi", [])) or "-" | |
| material_label = ", ".join(st.session_state.get("materials_multi", [])) or "-" | |
| # ๋จ์ผ ๊ฐ๋ ์์ผ๋ฉด ํจ๊ป ์ ์ฅ(ํ์์ ํํฐ/์ ๋ ฌ ํธํ๊ฒ) | |
| diameter_val = st.session_state.get("diameter") | |
| degree_val = st.session_state.get("degree") | |
| new_row = { | |
| "์ ํ": False, | |
| "Index": next_idx, | |
| "์ ์ฅ์๊ฐ": datetime.now().strftime("%Y-%m-%d %H:%M:%S"), | |
| "๋น๋ ํ์ ": bead_label, | |
| "์ ๋ ฅ ๋ฐฉ์": input_mode, | |
| "์์ฌ ๋๊ป (mm)": val_or_range("thickness", "thicknessRange", " mm"), | |
| "์ฌ์ง": material_label, | |
| "์ง๊ฒฝ": val_or_range("diameter", "diameterRange", " mm"), | |
| "๊ฐ๋": val_or_range("degree", "degreeRange", "ยฐ"), | |
| "์๋จ R": val_or_range("upperR", "upperRRange"), | |
| "ํ๋จ R": val_or_range("lowerR", "lowerRRange"), | |
| "THINNING": float(st.session_state.sim_result.get("THINNING")), | |
| "MAX_FAILURE": float(st.session_state.sim_result.get("MAX_FAILURE")), | |
| # ์ฐธ๊ณ ์ฉ ์์ ์ซ์(์์ผ๋ฉด None) | |
| "diameter": diameter_val, | |
| "degree": degree_val, | |
| } | |
| st.session_state.history.append(new_row) | |
| # ์ฆ์ ๋ฐ์ | |
| st.success("ํ์ฌ ๊ฒฐ๊ณผ๊ฐ ๊ธฐ๋ก์ ์ ์ฅ๋์์ต๋๋ค.") | |
| st.rerun() | |
| # ===== ํ ์ด๋ธ/๋ฒํผ ๋์ ===== | |
| if st.session_state.history: | |
| df = pd.DataFrame(st.session_state.history) | |
| cols = ["์ ํ"] + [c for c in df.columns if c != "์ ํ"] | |
| df = df[cols] | |
| if select_all_btn: | |
| for r in st.session_state.history: | |
| r["์ ํ"] = True | |
| st.rerun() | |
| st.subheader(f"๊ธฐ๋ก ํ ์ด๋ธ (์ด {len(df)}๊ฑด, ์ฒดํฌ ํ ์ญ์ ๊ฐ๋ฅ)") | |
| edited_df = st.data_editor( | |
| df, hide_index=True, use_container_width=True, key="history_editor" | |
| ) | |
| if delete_btn: | |
| selected_index = edited_df[edited_df["์ ํ"] == True]["Index"].tolist() | |
| st.session_state.history = [ | |
| rec for rec in st.session_state.history if rec.get("Index") not in selected_index | |
| ] | |
| st.success(f"{len(selected_index)}๊ฐ ํญ๋ชฉ ์ญ์ ์๋ฃ!") | |
| st.rerun() | |
| # CSV ๋ค์ด๋ก๋(์ ์ฒด/์ ํ) | |
| sel_df = edited_df[edited_df["์ ํ"] == True].copy() | |
| c1, c2 = st.columns(2) | |
| with c1: | |
| csv_all = edited_df.to_csv(index=False).encode("utf-8-sig") | |
| st.download_button("CSV (์ ์ฒด ๋ค์ด๋ก๋)", csv_all, "simulation_history_all.csv", "text/csv", use_container_width=True) | |
| with c2: | |
| if len(sel_df): | |
| csv_sel = sel_df.to_csv(index=False).encode("utf-8-sig") | |
| st.download_button("CSV (์ ํ๋ง ๋ค์ด๋ก๋)", csv_sel, "simulation_history_selected.csv", "text/csv", use_container_width=True) | |
| else: | |
| st.caption("์ ํ๋ ํ์ด ์์ต๋๋ค. ํ์์ ์ฒดํฌ ํ ๋ค์ด๋ก๋ํ์ธ์.") | |
| else: | |
| st.info("์์ง ์ ์ฅ๋ ๊ธฐ๋ก์ด ์์ต๋๋ค. ๋ฒ์ ์ ๋ ฅ์ผ๋ก ์คํํ๋ฉด ์กฐ๊ฑด์ ๋ง์กฑํ ๋ชจ๋ ์กฐํฉ์ด ์๋ ์ ์ฅ๋ฉ๋๋ค.") | |