Enhance Gradio app with integrated threshold diagnosis, trend detection, and anomaly analysis features. Added file loading functionality, improved data processing, and refined UI components for better user experience.
Browse files
app.py
CHANGED
|
@@ -1,12 +1,21 @@
|
|
| 1 |
-
# 統合版 Gradio アプリ (
|
| 2 |
import gradio as gr
|
| 3 |
import pandas as pd
|
| 4 |
import numpy as np
|
| 5 |
-
from sklearn.linear_model import LinearRegression
|
| 6 |
import json
|
| 7 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
# ========= 共通ユーティリティ =========
|
| 10 |
def normalize(s):
|
| 11 |
return str(s).replace("\u3000", " ").replace("\n", "").replace("\r", "").strip()
|
| 12 |
|
|
@@ -21,255 +30,315 @@ def find_matching_column(df, col_id, item_name, process_name):
|
|
| 21 |
]
|
| 22 |
return candidates[0] if candidates else None
|
| 23 |
|
| 24 |
-
#
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
try:
|
| 27 |
-
df = pd.read_csv(csv_file.name, header=[0,1,2])
|
| 28 |
-
|
| 29 |
df = df.drop(df.columns[0], axis=1)
|
| 30 |
-
df.insert(0,"timestamp",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
thresholds_df = pd.read_excel(excel_file.name)
|
| 33 |
-
thresholds_df["Important"] = thresholds_df["Important"].astype(str).str.upper().map({"TRUE":True,"FALSE":False})
|
| 34 |
-
for col in ["LL","L","H","HH"]:
|
| 35 |
if col in thresholds_df.columns:
|
| 36 |
thresholds_df[col] = pd.to_numeric(thresholds_df[col], errors="coerce")
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
except Exception as e:
|
| 38 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
try:
|
| 41 |
target_time = pd.to_datetime(datetime_str)
|
| 42 |
-
except:
|
| 43 |
-
return None,None,None,
|
| 44 |
|
| 45 |
start_time = target_time - pd.Timedelta(minutes=window_minutes)
|
| 46 |
-
df_window = df[(df["timestamp"]>=start_time)&(df["timestamp"]<=target_time)]
|
| 47 |
if df_window.empty:
|
| 48 |
-
return None,None,None,"⚠ 指定範囲にデータなし",None
|
| 49 |
|
| 50 |
-
proc_thresholds = thresholds_df[thresholds_df["ProcessNo_ProcessName"]==process_name]
|
| 51 |
if proc_thresholds.empty:
|
| 52 |
-
return None,None,None,f"⚠ {process_name} の閾値なし",None
|
| 53 |
-
|
| 54 |
-
all_results=[]
|
| 55 |
-
for _,row in df_window.iterrows():
|
| 56 |
-
for _,thr in proc_thresholds.iterrows():
|
| 57 |
-
col_tuple=
|
| 58 |
-
if col_tuple not in df.columns:
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
elif pd.notna(thr.get("L")) and value<thr["L"]: status="LOW"
|
| 63 |
-
elif pd.notna(thr.get("HH")) and value>thr["HH"]: status="HIGH-HIGH"
|
| 64 |
-
elif pd.notna(thr.get("H")) and value>thr["H"]: status="HIGH"
|
| 65 |
all_results.append({
|
| 66 |
-
"ItemName":thr["ItemName"],
|
| 67 |
-
"
|
|
|
|
|
|
|
| 68 |
})
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
result_df_all=pd.DataFrame({"状態":status_counts.index,"件数":status_counts.values,"割合(%)":ratio.values})
|
| 75 |
-
|
| 76 |
-
imp=[r for r in all_results if r["重要項目"]]
|
| 77 |
-
if imp:
|
| 78 |
-
total_imp=len(imp)
|
| 79 |
-
c=pd.Series([r["判定"] for r in imp]).value_counts().reindex(["LOW-LOW","LOW","OK","HIGH","HIGH-HIGH"],fill_value=0)
|
| 80 |
-
ratio_imp=(c/total_imp*100).round(1)
|
| 81 |
-
result_df_imp=pd.DataFrame({"状態":c.index,"件数":c.values,"割合(%)":ratio_imp.values})
|
| 82 |
-
else:
|
| 83 |
-
result_df_imp=pd.DataFrame(columns=["状態","件数","割合(%)"])
|
| 84 |
-
ratio_imp=pd.Series(dtype=float)
|
| 85 |
-
|
| 86 |
-
per_item=[]
|
| 87 |
-
for item in [r["ItemName"] for r in imp]:
|
| 88 |
-
item_results=[r for r in imp if r["ItemName"]==item]
|
| 89 |
-
c=pd.Series([r["判定"] for r in item_results]).value_counts().reindex(["LOW-LOW","LOW","OK","HIGH","HIGH-HIGH"],fill_value=0)
|
| 90 |
-
ratio_item=(c/len(item_results)*100).round(1)
|
| 91 |
-
for s,v,r in zip(c.index,c.values,ratio_item.values):
|
| 92 |
-
per_item.append({"ItemName":item,"状態":s,"件数":v,"割合(%)":r})
|
| 93 |
-
result_df_imp_items=pd.DataFrame(per_item)
|
| 94 |
-
|
| 95 |
-
summary=f"✅ {process_name} の診断完了\n[全項目] "+ " / ".join([f"{s}:{r:.1f}%" for s,r in ratio.items()]) +"\n[重要項目全体] "+(" / ".join([f"{s}:{r:.1f}%" for s,r in ratio_imp.items()]) if not result_df_imp.empty else "対象なし")
|
| 96 |
-
|
| 97 |
-
json_data={"集計結果":{"全項目割合":ratio.to_dict(),"重要項目全体割合":ratio_imp.to_dict(),"重要項目ごと割合":per_item}}
|
| 98 |
-
result_json=json.dumps(json_data, ensure_ascii=False, indent=2)
|
| 99 |
-
|
| 100 |
-
return result_df_all,result_df_imp,result_df_imp_items,summary,result_json
|
| 101 |
-
|
| 102 |
-
# ========= 傾向検出 =========
|
| 103 |
-
def detect_trends_with_forecast(csv_file, excel_file, process_name, datetime_str, window_minutes, forecast_minutes):
|
| 104 |
-
try:
|
| 105 |
-
df = pd.read_csv(csv_file.name, header=[0,1,2])
|
| 106 |
-
ts=df.iloc[:,0]; df=df.drop(df.columns[0],axis=1); df.insert(0,"timestamp",pd.to_datetime(ts,errors="coerce"))
|
| 107 |
-
thresholds_df=pd.read_excel(excel_file.name)
|
| 108 |
-
thresholds_df["Important"]=thresholds_df["Important"].astype(str).str.upper().map({"TRUE":True,"FALSE":False})
|
| 109 |
-
for col in ["LL","L","H","HH"]:
|
| 110 |
-
if col in thresholds_df.columns: thresholds_df[col]=pd.to_numeric(thresholds_df[col], errors="coerce")
|
| 111 |
-
except Exception as e:
|
| 112 |
-
return None,f"❌ 入力エラー: {e}",None
|
| 113 |
-
|
| 114 |
-
target_time=pd.to_datetime(datetime_str)
|
| 115 |
-
start_time=target_time-pd.Timedelta(minutes=window_minutes)
|
| 116 |
-
df_window=df[(df["timestamp"]>=start_time)&(df["timestamp"]<=target_time)]
|
| 117 |
-
if df_window.empty: return None,"⚠ データなし",None
|
| 118 |
-
|
| 119 |
-
interval=df_window["timestamp"].diff().median()
|
| 120 |
-
if pd.isna(interval): return None,"⚠ サンプリング間隔不明",None
|
| 121 |
-
interval_minutes=interval.total_seconds()/60
|
| 122 |
-
|
| 123 |
-
proc_thresholds=thresholds_df[(thresholds_df["ProcessNo_ProcessName"]==process_name)&(thresholds_df["Important"]==True)]
|
| 124 |
-
if proc_thresholds.empty: return None,f"⚠ {process_name} の重要項目なし",None
|
| 125 |
-
|
| 126 |
-
results=[]
|
| 127 |
-
for _,thr in proc_thresholds.iterrows():
|
| 128 |
-
col=(thr["ColumnID"],thr["ItemName"],thr["ProcessNo_ProcessName"])
|
| 129 |
-
if col not in df.columns: continue
|
| 130 |
-
series=df_window[col].dropna()
|
| 131 |
-
if len(series)<3: continue
|
| 132 |
-
|
| 133 |
-
x=np.arange(len(series)).reshape(-1,1); y=series.values.reshape(-1,1)
|
| 134 |
-
model=LinearRegression().fit(x,y); slope=model.coef_[0][0]
|
| 135 |
-
last_val=series.iloc[-1]
|
| 136 |
-
forecast_steps=int(forecast_minutes/interval_minutes)
|
| 137 |
-
forecast_val=model.predict([[len(series)+forecast_steps]])[0][0]
|
| 138 |
-
forecast_time=target_time+pd.Timedelta(minutes=forecast_minutes)
|
| 139 |
-
|
| 140 |
-
status="安定"
|
| 141 |
-
if slope<0 and pd.notna(thr.get("LL")):
|
| 142 |
-
if last_val>thr["LL"]: status="LL接近下降傾向"
|
| 143 |
-
elif last_val<=thr["LL"]: status="LL逸脱下降傾向"
|
| 144 |
-
if slope>0 and pd.notna(thr.get("HH")):
|
| 145 |
-
if last_val<thr["HH"]: status="HH接近上昇傾向"
|
| 146 |
-
elif last_val>=thr["HH"]: status="HH逸脱上昇傾向"
|
| 147 |
-
|
| 148 |
-
forecast_status="安定"
|
| 149 |
-
if pd.notna(thr.get("LL")) and forecast_val<=thr["LL"]: forecast_status="LL逸脱予測"
|
| 150 |
-
elif pd.notna(thr.get("HH")) and forecast_val>=thr["HH"]: forecast_status="HH逸脱予測"
|
| 151 |
-
|
| 152 |
-
results.append({"ItemName":thr["ItemName"],"傾向":status,"傾き":round(slope,4),
|
| 153 |
-
"最終値":round(float(last_val),3),"予測値":round(float(forecast_val),3),
|
| 154 |
-
"予測時刻":str(forecast_time),"予測傾向":forecast_status})
|
| 155 |
-
result_df=pd.DataFrame(results)
|
| 156 |
-
return result_df,"✅ 傾向検出+未来予測完了",json.dumps(results, ensure_ascii=False, indent=2)
|
| 157 |
-
|
| 158 |
-
# ========= 予兆解析 =========
|
| 159 |
-
def forecast_process_with_lag(csv_file, excel_file, lag_excel_file, process_name, datetime_str, forecast_minutes):
|
| 160 |
-
try:
|
| 161 |
-
df=pd.read_csv(csv_file.name, header=[0,1,2])
|
| 162 |
-
ts=df.iloc[:,0]; df=df.drop(df.columns[0],axis=1); df.insert(0,"timestamp",pd.to_datetime(ts,errors="coerce"))
|
| 163 |
-
df.columns=["timestamp" if (isinstance(c,str) and c=="timestamp") else "_".join([str(x) for x in c if x]) for c in df.columns]
|
| 164 |
-
|
| 165 |
-
thresholds_df=pd.read_excel(excel_file.name)
|
| 166 |
-
thresholds_df["Important"]=thresholds_df["Important"].astype(str).str.upper().map({"TRUE":True,"FALSE":False})
|
| 167 |
-
for col in ["LL","L","H","HH"]:
|
| 168 |
-
if col in thresholds_df.columns: thresholds_df[col]=pd.to_numeric(thresholds_df[col], errors="coerce")
|
| 169 |
-
|
| 170 |
-
lag_matrix=pd.read_excel(lag_excel_file.name, index_col=0)
|
| 171 |
-
except Exception as e:
|
| 172 |
-
return None,f"❌ 入力エラー: {e}",None
|
| 173 |
-
|
| 174 |
-
target_time=pd.to_datetime(datetime_str)
|
| 175 |
-
forecast_time=target_time+pd.Timedelta(minutes=forecast_minutes)
|
| 176 |
-
|
| 177 |
-
proc_thresholds=thresholds_df[(thresholds_df["ProcessNo_ProcessName"]==process_name)&(thresholds_df["Important"]==True)]
|
| 178 |
-
if proc_thresholds.empty: return None,f"⚠ {process_name} に重要項目なし",None
|
| 179 |
-
if process_name not in lag_matrix.index: return None,f"⚠ ラグ表に {process_name} の行なし",None
|
| 180 |
|
| 181 |
-
|
| 182 |
-
if lag_row.empty: return None,f"⚠ 正のラグなし",None
|
| 183 |
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
-
|
| 194 |
-
|
|
|
|
|
|
|
| 195 |
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
for x_col in up_cols:
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
merged_df
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
|
|
|
|
|
|
|
|
|
| 215 |
for x_col in up_cols:
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
elif pd.notna(
|
| 228 |
-
|
| 229 |
-
elif pd.notna(
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
|
| 241 |
with gr.Row():
|
| 242 |
-
csv_input=gr.File(label="CSVファイル", file_types=[".csv"], type="filepath")
|
| 243 |
-
excel_input=gr.File(label="Excel
|
| 244 |
-
|
|
|
|
|
|
|
| 245 |
|
| 246 |
with gr.Tabs():
|
| 247 |
with gr.Tab("閾値診断"):
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 255 |
|
| 256 |
with gr.Tab("傾向検出"):
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
|
| 265 |
with gr.Tab("予兆解析"):
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 統合版 Gradio アプリ (閾値診断 + 傾向検出 + 予兆解析)
|
| 2 |
import gradio as gr
|
| 3 |
import pandas as pd
|
| 4 |
import numpy as np
|
|
|
|
| 5 |
import json
|
| 6 |
import os
|
| 7 |
+
from sklearn.linear_model import LinearRegression
|
| 8 |
+
|
| 9 |
+
# --- 共通ユーティリティ ---
|
| 10 |
+
def convert_value(v):
|
| 11 |
+
if hasattr(v, "item"):
|
| 12 |
+
return v.item()
|
| 13 |
+
if isinstance(v, (np.integer, int)):
|
| 14 |
+
return int(v)
|
| 15 |
+
if isinstance(v, (np.floating, float)):
|
| 16 |
+
return float(v)
|
| 17 |
+
return v
|
| 18 |
|
|
|
|
| 19 |
def normalize(s):
|
| 20 |
return str(s).replace("\u3000", " ").replace("\n", "").replace("\r", "").strip()
|
| 21 |
|
|
|
|
| 30 |
]
|
| 31 |
return candidates[0] if candidates else None
|
| 32 |
|
| 33 |
+
# --- グローバル変数(全タブで共有) ---
|
| 34 |
+
df = None
|
| 35 |
+
thresholds_df = None
|
| 36 |
+
lag_matrix = None
|
| 37 |
+
|
| 38 |
+
# --- ファイル読み込み ---
|
| 39 |
+
def load_files(csv_file, excel_file, lag_file):
|
| 40 |
+
global df, thresholds_df, lag_matrix
|
| 41 |
try:
|
| 42 |
+
df = pd.read_csv(csv_file.name, header=[0, 1, 2])
|
| 43 |
+
timestamp_col = pd.to_datetime(df.iloc[:, 0], errors="coerce")
|
| 44 |
df = df.drop(df.columns[0], axis=1)
|
| 45 |
+
df.insert(0, "timestamp", timestamp_col)
|
| 46 |
+
|
| 47 |
+
# MultiIndex → 文字列化
|
| 48 |
+
def col_to_str(col):
|
| 49 |
+
return "_".join([str(c) for c in col if c]) if isinstance(col, tuple) else str(col)
|
| 50 |
+
|
| 51 |
+
df.columns = [
|
| 52 |
+
"timestamp" if (isinstance(c, str) and c == "timestamp") else col_to_str(c)
|
| 53 |
+
for c in df.columns
|
| 54 |
+
]
|
| 55 |
|
| 56 |
thresholds_df = pd.read_excel(excel_file.name)
|
| 57 |
+
thresholds_df["Important"] = thresholds_df["Important"].astype(str).str.upper().map({"TRUE": True, "FALSE": False})
|
| 58 |
+
for col in ["LL", "L", "H", "HH"]:
|
| 59 |
if col in thresholds_df.columns:
|
| 60 |
thresholds_df[col] = pd.to_numeric(thresholds_df[col], errors="coerce")
|
| 61 |
+
|
| 62 |
+
lag_matrix = pd.read_excel(lag_file.name, index_col=0)
|
| 63 |
+
|
| 64 |
+
return "✅ ファイル読み込み成功"
|
| 65 |
except Exception as e:
|
| 66 |
+
return f"❌ ファイル読み込み失敗: {e}"
|
| 67 |
+
|
| 68 |
+
# --- Tab1: 閾値診断 ---
|
| 69 |
+
def judge_status(value, ll, l, h, hh):
|
| 70 |
+
if pd.notna(ll) and value < ll:
|
| 71 |
+
return "LOW-LOW"
|
| 72 |
+
elif pd.notna(l) and value < l:
|
| 73 |
+
return "LOW"
|
| 74 |
+
elif pd.notna(hh) and value > hh:
|
| 75 |
+
return "HIGH-HIGH"
|
| 76 |
+
elif pd.notna(h) and value > h:
|
| 77 |
+
return "HIGH"
|
| 78 |
+
else:
|
| 79 |
+
return "OK"
|
| 80 |
|
| 81 |
+
def diagnose_process_range(process_name, datetime_str, window_minutes):
|
| 82 |
+
global df, thresholds_df
|
| 83 |
+
if df is None or thresholds_df is None:
|
| 84 |
+
return None, None, None, "⚠ ファイル未読み込み", None
|
| 85 |
try:
|
| 86 |
target_time = pd.to_datetime(datetime_str)
|
| 87 |
+
except Exception:
|
| 88 |
+
return None, None, None, "⚠ 日時が不正です", None
|
| 89 |
|
| 90 |
start_time = target_time - pd.Timedelta(minutes=window_minutes)
|
| 91 |
+
df_window = df[(df["timestamp"] >= start_time) & (df["timestamp"] <= target_time)]
|
| 92 |
if df_window.empty:
|
| 93 |
+
return None, None, None, "⚠ 指定範囲にデータなし", None
|
| 94 |
|
| 95 |
+
proc_thresholds = thresholds_df[thresholds_df["ProcessNo_ProcessName"] == process_name]
|
| 96 |
if proc_thresholds.empty:
|
| 97 |
+
return None, None, None, f"⚠ {process_name} の閾値なし", None
|
| 98 |
+
|
| 99 |
+
all_results = []
|
| 100 |
+
for _, row in df_window.iterrows():
|
| 101 |
+
for _, thr in proc_thresholds.iterrows():
|
| 102 |
+
col_tuple = f"{thr['ColumnID']}_{thr['ItemName']}_{thr['ProcessNo_ProcessName']}"
|
| 103 |
+
if col_tuple not in df.columns:
|
| 104 |
+
continue
|
| 105 |
+
value = row[col_tuple]
|
| 106 |
+
status = judge_status(value, thr.get("LL"), thr.get("L"), thr.get("H"), thr.get("HH"))
|
|
|
|
|
|
|
|
|
|
| 107 |
all_results.append({
|
| 108 |
+
"ItemName": thr["ItemName"],
|
| 109 |
+
"判定": status,
|
| 110 |
+
"重要項目": bool(thr.get("Important", False)),
|
| 111 |
+
"時刻": str(row["timestamp"])
|
| 112 |
})
|
| 113 |
|
| 114 |
+
total = len(all_results)
|
| 115 |
+
status_counts = pd.Series([r["判定"] for r in all_results]).value_counts().reindex(
|
| 116 |
+
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0)
|
| 117 |
+
status_ratio = (status_counts / total * 100).round(1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 118 |
|
| 119 |
+
result_df_all = pd.DataFrame({"状態": status_counts.index, "件数": status_counts.values, "割合(%)": status_ratio.values})
|
|
|
|
| 120 |
|
| 121 |
+
important_results = [r for r in all_results if r["重要項目"]]
|
| 122 |
+
if important_results:
|
| 123 |
+
total_imp = len(important_results)
|
| 124 |
+
status_counts_imp = pd.Series([r["判定"] for r in important_results]).value_counts().reindex(
|
| 125 |
+
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0)
|
| 126 |
+
status_ratio_imp = (status_counts_imp / total_imp * 100).round(1)
|
| 127 |
+
result_df_imp = pd.DataFrame({"状態": status_counts_imp.index, "件数": status_counts_imp.values, "割合(%)": status_ratio_imp.values})
|
| 128 |
+
else:
|
| 129 |
+
result_df_imp = pd.DataFrame(columns=["状態", "件数", "割合(%)"])
|
| 130 |
+
status_ratio_imp = pd.Series(dtype=float)
|
| 131 |
+
|
| 132 |
+
result_json = json.dumps({
|
| 133 |
+
"集計結果": {
|
| 134 |
+
"全項目割合": {k: convert_value(v) for k, v in status_ratio.to_dict().items()},
|
| 135 |
+
"重要項目全体割合": {k: convert_value(v) for k, v in status_ratio_imp.to_dict().items()} if not result_df_imp.empty else {}
|
| 136 |
+
}
|
| 137 |
+
}, ensure_ascii=False, indent=2)
|
| 138 |
+
|
| 139 |
+
summary = f"✅ {process_name} の診断完了({start_time} ~ {target_time})"
|
| 140 |
+
return result_df_all, result_df_imp, None, summary, result_json
|
| 141 |
+
|
| 142 |
+
# --- Tab2: 傾向検出 ---
|
| 143 |
+
def detect_trends_with_forecast(process_name, datetime_str, window_minutes, forecast_minutes):
|
| 144 |
+
global df, thresholds_df
|
| 145 |
+
if df is None or thresholds_df is None:
|
| 146 |
+
return None, "⚠ ファイル未読み込み", None
|
| 147 |
+
target_time = pd.to_datetime(datetime_str)
|
| 148 |
+
start_time = target_time - pd.Timedelta(minutes=window_minutes)
|
| 149 |
+
df_window = df[(df["timestamp"] >= start_time) & (df["timestamp"] <= target_time)]
|
| 150 |
+
if df_window.empty:
|
| 151 |
+
return None, "⚠ データなし", None
|
| 152 |
|
| 153 |
+
interval = df_window["timestamp"].diff().median()
|
| 154 |
+
if pd.isna(interval):
|
| 155 |
+
return None, "⚠ サンプリング間隔検出失敗", None
|
| 156 |
+
interval_minutes = interval.total_seconds() / 60
|
| 157 |
|
| 158 |
+
proc_thresholds = thresholds_df[(thresholds_df["ProcessNo_ProcessName"] == process_name) & (thresholds_df["Important"] == True)]
|
| 159 |
+
if proc_thresholds.empty:
|
| 160 |
+
return None, f"⚠ {process_name} の重要項目なし", None
|
| 161 |
+
|
| 162 |
+
results = []
|
| 163 |
+
for _, thr in proc_thresholds.iterrows():
|
| 164 |
+
col_tuple = f"{thr['ColumnID']}_{thr['ItemName']}_{thr['ProcessNo_ProcessName']}"
|
| 165 |
+
if col_tuple not in df.columns:
|
| 166 |
+
continue
|
| 167 |
+
series = df_window[col_tuple].dropna()
|
| 168 |
+
if len(series) < 3:
|
| 169 |
+
continue
|
| 170 |
+
x = np.arange(len(series)).reshape(-1, 1)
|
| 171 |
+
y = series.values.reshape(-1, 1)
|
| 172 |
+
model = LinearRegression().fit(x, y)
|
| 173 |
+
slope = model.coef_[0][0]
|
| 174 |
+
last_val = series.iloc[-1]
|
| 175 |
+
|
| 176 |
+
forecast_steps = int(forecast_minutes / interval_minutes)
|
| 177 |
+
forecast_val = model.predict([[len(series) + forecast_steps]])[0][0]
|
| 178 |
+
forecast_time = target_time + pd.Timedelta(minutes=forecast_minutes)
|
| 179 |
+
|
| 180 |
+
risk = "安定"
|
| 181 |
+
if pd.notna(thr.get("LL")) and forecast_val <= thr["LL"]:
|
| 182 |
+
risk = "LL逸脱予測"
|
| 183 |
+
elif pd.notna(thr.get("HH")) and forecast_val >= thr["HH"]:
|
| 184 |
+
risk = "HH逸脱予測"
|
| 185 |
+
|
| 186 |
+
results.append({
|
| 187 |
+
"ItemName": thr["ItemName"],
|
| 188 |
+
"傾き": round(float(slope), 4),
|
| 189 |
+
"最終値": round(float(last_val), 3),
|
| 190 |
+
"予測値": round(float(forecast_val), 3),
|
| 191 |
+
"予測時刻": str(forecast_time),
|
| 192 |
+
"予測リスク": risk
|
| 193 |
+
})
|
| 194 |
+
|
| 195 |
+
result_df = pd.DataFrame(results)
|
| 196 |
+
result_json = json.dumps(results, ensure_ascii=False, indent=2)
|
| 197 |
+
return result_df, "✅ 傾向検出完了", result_json
|
| 198 |
+
|
| 199 |
+
# --- Tab3: 予兆解析 ---
|
| 200 |
+
def forecast_process_with_lag(process_name, datetime_str, forecast_minutes):
|
| 201 |
+
global df, thresholds_df, lag_matrix
|
| 202 |
+
if df is None or thresholds_df is None or lag_matrix is None:
|
| 203 |
+
return None, "⚠ ファイル未読み込み", None
|
| 204 |
+
target_time = pd.to_datetime(datetime_str)
|
| 205 |
+
forecast_time = target_time + pd.Timedelta(minutes=forecast_minutes)
|
| 206 |
+
|
| 207 |
+
proc_thresholds = thresholds_df[(thresholds_df["ProcessNo_ProcessName"] == process_name) & (thresholds_df["Important"] == True)]
|
| 208 |
+
if proc_thresholds.empty:
|
| 209 |
+
return None, f"⚠ {process_name} の重要項目なし", None
|
| 210 |
+
|
| 211 |
+
if process_name not in lag_matrix.index:
|
| 212 |
+
return None, f"⚠ {process_name} のラグ行なし", None
|
| 213 |
+
|
| 214 |
+
lag_row = lag_matrix.loc[process_name].dropna()
|
| 215 |
+
lag_row = lag_row[lag_row > 0]
|
| 216 |
+
if lag_row.empty:
|
| 217 |
+
return None, f"⚠ {process_name} に正のラグなし", None
|
| 218 |
+
|
| 219 |
+
results = []
|
| 220 |
+
for _, thr in proc_thresholds.iterrows():
|
| 221 |
+
y_col = find_matching_column(df, thr["ColumnID"], thr["ItemName"], thr["ProcessNo_ProcessName"])
|
| 222 |
+
if y_col is None:
|
| 223 |
+
continue
|
| 224 |
+
df_window = df[df["timestamp"] <= target_time].copy()
|
| 225 |
+
df_window = df_window[df_window["timestamp"] >= target_time - pd.Timedelta(hours=24)]
|
| 226 |
+
if df_window.empty:
|
| 227 |
+
continue
|
| 228 |
+
base_df = df_window[["timestamp", y_col]].rename(columns={y_col: "y"})
|
| 229 |
+
merged_df = base_df.copy()
|
| 230 |
+
for up_proc, lag_min in lag_row.items():
|
| 231 |
+
up_cols = [c for c in df.columns if isinstance(c, str) and up_proc in c]
|
| 232 |
for x_col in up_cols:
|
| 233 |
+
shifted = df_window.loc[:, ["timestamp", x_col]].copy()
|
| 234 |
+
shifted["timestamp"] = shifted["timestamp"] + pd.Timedelta(minutes=lag_min)
|
| 235 |
+
shifted = shifted.rename(columns={x_col: f"{x_col}_lag{lag_min}"})
|
| 236 |
+
merged_df = pd.merge_asof(
|
| 237 |
+
merged_df.sort_values("timestamp"),
|
| 238 |
+
shifted.sort_values("timestamp"),
|
| 239 |
+
on="timestamp",
|
| 240 |
+
direction="nearest"
|
| 241 |
+
)
|
| 242 |
+
X_all = merged_df.drop(columns=["timestamp", "y"], errors="ignore").values
|
| 243 |
+
Y_all = merged_df["y"].values
|
| 244 |
+
if X_all.shape[1] == 0 or len(Y_all) < 5:
|
| 245 |
+
continue
|
| 246 |
+
model = LinearRegression().fit(X_all, Y_all)
|
| 247 |
+
|
| 248 |
+
X_pred = []
|
| 249 |
+
for up_proc, lag_min in lag_row.items():
|
| 250 |
+
up_cols = [c for c in df.columns if isinstance(c, str) and up_proc in c]
|
| 251 |
for x_col in up_cols:
|
| 252 |
+
ref_time = forecast_time - pd.Timedelta(minutes=lag_min)
|
| 253 |
+
idx = (df["timestamp"] - ref_time).abs().idxmin()
|
| 254 |
+
X_pred.append(df.loc[idx, x_col])
|
| 255 |
+
if not X_pred:
|
| 256 |
+
continue
|
| 257 |
+
|
| 258 |
+
pred_val = model.predict([X_pred])[0]
|
| 259 |
+
|
| 260 |
+
risk = "OK"
|
| 261 |
+
if pd.notna(thr.get("LL")) and pred_val <= thr["LL"]:
|
| 262 |
+
risk = "LOW-LOW"
|
| 263 |
+
elif pd.notna(thr.get("L")) and pred_val <= thr["L"]:
|
| 264 |
+
risk = "LOW"
|
| 265 |
+
elif pd.notna(thr.get("HH")) and pred_val >= thr["HH"]:
|
| 266 |
+
risk = "HIGH-HIGH"
|
| 267 |
+
elif pd.notna(thr.get("H")) and pred_val >= thr["H"]:
|
| 268 |
+
risk = "HIGH"
|
| 269 |
+
|
| 270 |
+
results.append({
|
| 271 |
+
"ItemName": thr["ItemName"],
|
| 272 |
+
"予測値": round(float(pred_val), 3),
|
| 273 |
+
"予測時刻": str(forecast_time),
|
| 274 |
+
"予測リスク": risk
|
| 275 |
+
})
|
| 276 |
+
|
| 277 |
+
result_df = pd.DataFrame(results)
|
| 278 |
+
result_json = json.dumps(results, ensure_ascii=False, indent=2)
|
| 279 |
+
return result_df, f"✅ {process_name} の予兆解析完了", result_json
|
| 280 |
+
|
| 281 |
+
# --- Gradio UI ---
|
| 282 |
+
with gr.Blocks(css=".gradio-container {overflow: auto !important;}") as demo:
|
| 283 |
+
gr.Markdown("## 統合トレンド解析アプリ (MCP対応)")
|
| 284 |
|
| 285 |
with gr.Row():
|
| 286 |
+
csv_input = gr.File(label="CSVファイル", file_types=[".csv"], type="filepath")
|
| 287 |
+
excel_input = gr.File(label="Excel閾値ファイル", file_types=[".xlsx"], type="filepath")
|
| 288 |
+
lag_input = gr.File(label="ラグファイル", file_types=[".xlsx"], type="filepath")
|
| 289 |
+
load_btn = gr.Button("ファイル読み込み")
|
| 290 |
+
load_status = gr.Textbox(label="読み込み結果")
|
| 291 |
|
| 292 |
with gr.Tabs():
|
| 293 |
with gr.Tab("閾値診断"):
|
| 294 |
+
process_name1 = gr.Textbox(label="プロセス名")
|
| 295 |
+
datetime_str1 = gr.Textbox(label="診断基準日時")
|
| 296 |
+
window_minutes1 = gr.Number(label="さかのぼる時間幅(分)", value=60)
|
| 297 |
+
run_btn1 = gr.Button("診断実行")
|
| 298 |
+
result_df_all = gr.Dataframe(label="全項目の状態集計")
|
| 299 |
+
result_df_imp = gr.Dataframe(label="重要項目全体の状態集計")
|
| 300 |
+
summary_output = gr.Textbox(label="サマリー")
|
| 301 |
+
json_output = gr.Json(label="JSON集計結果")
|
| 302 |
+
run_btn1.click(
|
| 303 |
+
diagnose_process_range,
|
| 304 |
+
inputs=[process_name1, datetime_str1, window_minutes1],
|
| 305 |
+
outputs=[result_df_all, result_df_imp, None, summary_output, json_output]
|
| 306 |
+
)
|
| 307 |
|
| 308 |
with gr.Tab("傾向検出"):
|
| 309 |
+
process_name2 = gr.Textbox(label="プロセス名")
|
| 310 |
+
datetime_str2 = gr.Textbox(label="基準日時")
|
| 311 |
+
window_minutes2 = gr.Number(label="過去の時間幅(分)", value=60)
|
| 312 |
+
forecast_minutes2 = gr.Number(label="未来予測時間幅(分)", value=60)
|
| 313 |
+
run_btn2 = gr.Button("傾向検出実行")
|
| 314 |
+
result_df2 = gr.Dataframe(label="傾向+予測結果")
|
| 315 |
+
summary_output2 = gr.Textbox(label="サマリー")
|
| 316 |
+
json_output2 = gr.Json(label="JSON結果")
|
| 317 |
+
run_btn2.click(
|
| 318 |
+
detect_trends_with_forecast,
|
| 319 |
+
inputs=[process_name2, datetime_str2, window_minutes2, forecast_minutes2],
|
| 320 |
+
outputs=[result_df2, summary_output2, json_output2]
|
| 321 |
+
)
|
| 322 |
|
| 323 |
with gr.Tab("予兆解析"):
|
| 324 |
+
process_name3 = gr.Textbox(label="プロセス名")
|
| 325 |
+
datetime_str3 = gr.Textbox(label="基準日時")
|
| 326 |
+
forecast_minutes3 = gr.Number(label="未来予測時間幅(分)", value=60)
|
| 327 |
+
run_btn3 = gr.Button("予兆解析実行")
|
| 328 |
+
result_df3 = gr.Dataframe(label="予兆解析結果")
|
| 329 |
+
summary_output3 = gr.Textbox(label="サマリー")
|
| 330 |
+
json_output3 = gr.Json(label="JSON結果")
|
| 331 |
+
run_btn3.click(
|
| 332 |
+
forecast_process_with_lag,
|
| 333 |
+
inputs=[process_name3, datetime_str3, forecast_minutes3],
|
| 334 |
+
outputs=[result_df3, summary_output3, json_output3]
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
load_btn.click(load_files, inputs=[csv_input, excel_input, lag_input], outputs=[load_status])
|
| 338 |
+
|
| 339 |
+
if __name__ == "__main__":
|
| 340 |
+
use_mcp = os.getenv("USE_MCP", "0") == "1"
|
| 341 |
+
if use_mcp:
|
| 342 |
+
demo.launch(mcp_server=True)
|
| 343 |
+
else:
|
| 344 |
+
demo.launch(server_name="0.0.0.0", share=False)
|