Spaces:
Sleeping
Sleeping
| # inference.py | |
| import os | |
| import json | |
| import numpy as np | |
| import pandas as pd | |
| import torch | |
| import lightgbm as lgb | |
| from sklearn.preprocessing import StandardScaler | |
| from torch import nn | |
| def make_input(material, thickness, diameter, degree, upperR, lowerR, beadType): | |
| # ๋น๋ ํ์ ์ LB, RB ๊ฐ์ผ๋ก ๋ณํ | |
| lb, rb = 0, 0 | |
| if beadType == "Left Bead": | |
| lb = 1 | |
| elif beadType == "Right Bead": | |
| rb = 1 | |
| elif beadType == "Double Bead": | |
| lb, rb = 1, 1 | |
| data = { | |
| "material": [material], | |
| "thickness": [thickness], | |
| "diameter": [diameter], | |
| "degree": [degree], | |
| "upper_radius": [upperR], | |
| "lower_radius": [lowerR], | |
| "LB": [lb], | |
| "RB": [rb], | |
| } | |
| return pd.DataFrame(data) | |
| # ========================= | |
| # ์ค์ | |
| # ========================= | |
| ART_DIR = "artifacts_blend" | |
| with open(os.path.join(ART_DIR, "columns.json"), "r", encoding="utf-8") as f: | |
| meta = json.load(f) | |
| NUM_COLS = meta["num_cols"] | |
| CAT_COL = meta["cat_col"] | |
| TARGET = meta["target"] | |
| with open(os.path.join(ART_DIR, "materials.json"), "r", encoding="utf-8") as f: | |
| materials = json.load(f)["materials"] | |
| # ========================= | |
| # FT-Transformer ์ ์ | |
| # ========================= | |
| class FTTransformer(nn.Module): | |
| def __init__(self, n_materials:int, n_num:int, d_model:int=128, nhead:int=8, | |
| num_layers:int=4, dim_ff:int=256, dropout:float=0.2): | |
| super().__init__() | |
| self.mat_emb = nn.Embedding(n_materials, d_model) | |
| self.num_linears = nn.ModuleList([nn.Linear(1, d_model) for _ in range(n_num)]) | |
| self.cls = nn.Parameter(torch.zeros(1, 1, d_model)) | |
| nn.init.trunc_normal_(self.cls, std=0.02) | |
| enc_layer = nn.TransformerEncoderLayer( | |
| d_model=d_model, nhead=nhead, | |
| dim_feedforward=dim_ff, dropout=dropout, | |
| batch_first=True, activation='gelu', norm_first=True | |
| ) | |
| self.encoder = nn.TransformerEncoder(enc_layer, num_layers=num_layers) | |
| self.head = nn.Sequential( | |
| nn.LayerNorm(d_model), | |
| nn.Linear(d_model, d_model), | |
| nn.GELU(), | |
| nn.Dropout(dropout), | |
| nn.Linear(d_model, 1) | |
| ) | |
| def forward(self, mat_ids: torch.LongTensor, x_num: torch.FloatTensor): | |
| B = x_num.size(0) | |
| mat_tok = self.mat_emb(mat_ids).unsqueeze(1) # (B,1,d) | |
| num_tok = torch.cat([lin(x_num[:, i:i+1]).unsqueeze(1) for i,lin in enumerate(self.num_linears)], dim=1) | |
| tokens = torch.cat([self.cls.expand(B, -1, -1), mat_tok, num_tok], dim=1) | |
| h = self.encoder(tokens) | |
| return self.head(h[:, 0, :]) # (B,1) | |
| # ========================= | |
| # ๋ชจ๋ธ ๋ถ๋ฌ์ค๊ธฐ | |
| # ========================= | |
| # LightGBM | |
| lgbm_models = [] | |
| for file in os.listdir(ART_DIR): | |
| if file.startswith("lgbm_fold") and file.endswith(".txt"): | |
| model = lgb.Booster(model_file=os.path.join(ART_DIR, file)) | |
| lgbm_models.append(model) | |
| # FT-Transformer (์ ํ ์ฌํญ, ์ง๊ธ์ max_failure๋ง) | |
| ftt_models, ftt_scalers = [], [] | |
| for file in os.listdir(ART_DIR): | |
| if file.startswith("ftt_fold") and file.endswith(".pt"): | |
| ckpt = torch.load(os.path.join(ART_DIR, file), map_location="cpu", weights_only=False) | |
| model = FTTransformer( | |
| n_materials=len(materials), n_num=len(NUM_COLS), | |
| d_model=192, nhead=8, num_layers=4, dim_ff=768, dropout=0.15 | |
| ) | |
| model.load_state_dict(ckpt["state_dict"]) | |
| model.eval() | |
| ftt_models.append(model) | |
| scaler = StandardScaler() | |
| scaler.mean_ = ckpt["scaler_mean"] | |
| scaler.scale_ = ckpt["scaler_scale"] | |
| ftt_scalers.append(scaler) | |
| # ========================= | |
| # ์์ธก ํจ์ | |
| # ========================= | |
| def predict_lgbm_ensemble(df_new: pd.DataFrame) -> np.ndarray: | |
| """LightGBM ์์๋ธ ์์ธก""" | |
| df_new = df_new.copy() | |
| # โ material์ ํ์ต๊ณผ ๋์ผํ๊ฒ ์นดํ ๊ณ ๋ฆฌ๋ก ๋ง์ถค | |
| df_new[CAT_COL] = pd.Categorical( | |
| df_new[CAT_COL].astype(str), | |
| categories=materials | |
| ) | |
| preds_list = [] | |
| for model in lgbm_models: | |
| preds_list.append(model.predict(df_new[[CAT_COL] + NUM_COLS])) | |
| return np.mean(preds_list, axis=0) | |
| def predict_dl_ensemble(df_new: pd.DataFrame) -> np.ndarray: | |
| """FT-Transformer ์์๋ธ ์์ธก""" | |
| if not ftt_models: | |
| raise RuntimeError("FT-Transformer ๋ชจ๋ธ์ด ๋ก๋๋์ง ์์์ต๋๋ค.") | |
| df_new = df_new.copy() | |
| df_new["_mat_id"] = df_new[CAT_COL].astype(str).map({m:i for i,m in enumerate(materials)}).fillna(0).astype(int) | |
| Xn = df_new[NUM_COLS].values.astype(np.float32) | |
| preds = [] | |
| for mdl, sc in zip(ftt_models, ftt_scalers): | |
| x = sc.transform(Xn).astype(np.float32) | |
| with torch.no_grad(): | |
| m_ids = torch.tensor(df_new["_mat_id"].values, dtype=torch.long) | |
| x_t = torch.tensor(x, dtype=torch.float32) | |
| p = mdl(m_ids, x_t).cpu().numpy().ravel() | |
| preds.append(p) | |
| return np.mean(preds, axis=0) | |
| def predict_blend(df_new: pd.DataFrame, alpha_path=os.path.join(ART_DIR,"blend_alpha.json")) -> np.ndarray: | |
| """FTT + LGBM ๋ธ๋ ๋ฉ""" | |
| with open(alpha_path, "r") as f: | |
| alpha = json.load(f)["best_alpha"] | |
| lgbm_pred = predict_lgbm_ensemble(df_new) | |
| dl_pred = predict_dl_ensemble(df_new) if ftt_models else lgbm_pred | |
| return alpha*dl_pred + (1-alpha)*lgbm_pred | |