Refactor diagnose_process_range function to remove file input parameters and utilize global variables for dataframes. Improved column access logic and streamlined data aggregation for important items. Enhanced JSON output formatting for clarity.
Browse files
app.py
CHANGED
|
@@ -78,21 +78,10 @@ def judge_status(value, ll, l, h, hh):
|
|
| 78 |
else:
|
| 79 |
return "OK"
|
| 80 |
|
| 81 |
-
def diagnose_process_range(
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
df = df.drop(df.columns[0], axis=1)
|
| 86 |
-
df.insert(0, "timestamp", timestamp_col)
|
| 87 |
-
df["timestamp"] = pd.to_datetime(df["timestamp"], errors="coerce")
|
| 88 |
-
|
| 89 |
-
thresholds_df = pd.read_excel(excel_file.name)
|
| 90 |
-
thresholds_df["Important"] = thresholds_df["Important"].astype(str).str.upper().map({"TRUE": True, "FALSE": False})
|
| 91 |
-
for col in ["LL", "L", "H", "HH"]:
|
| 92 |
-
if col in thresholds_df.columns:
|
| 93 |
-
thresholds_df[col] = pd.to_numeric(thresholds_df[col], errors="coerce")
|
| 94 |
-
except Exception as e:
|
| 95 |
-
return None, None, None, f"❌ 入力ファイルの読み込みに失敗しました: {e}", None
|
| 96 |
|
| 97 |
try:
|
| 98 |
target_time = pd.to_datetime(datetime_str)
|
|
@@ -112,10 +101,10 @@ def diagnose_process_range(csv_file, excel_file, process_name, datetime_str, win
|
|
| 112 |
all_results = []
|
| 113 |
for _, row in df_window.iterrows():
|
| 114 |
for _, thr in proc_thresholds.iterrows():
|
| 115 |
-
|
| 116 |
-
if
|
| 117 |
continue
|
| 118 |
-
value = row[
|
| 119 |
status = judge_status(value, thr.get("LL"), thr.get("L"), thr.get("H"), thr.get("HH"))
|
| 120 |
all_results.append({
|
| 121 |
"ColumnID": thr["ColumnID"],
|
|
@@ -131,13 +120,9 @@ def diagnose_process_range(csv_file, excel_file, process_name, datetime_str, win
|
|
| 131 |
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
|
| 132 |
)
|
| 133 |
status_ratio = (status_counts / total * 100).round(1)
|
| 134 |
-
result_df_all = pd.DataFrame({
|
| 135 |
-
"状態": status_counts.index,
|
| 136 |
-
"件数": status_counts.values,
|
| 137 |
-
"割合(%)": status_ratio.values
|
| 138 |
-
})
|
| 139 |
|
| 140 |
-
# --- 重要項目全体
|
| 141 |
important_results = [r for r in all_results if r["重要項目"]]
|
| 142 |
if important_results:
|
| 143 |
total_imp = len(important_results)
|
|
@@ -145,43 +130,29 @@ def diagnose_process_range(csv_file, excel_file, process_name, datetime_str, win
|
|
| 145 |
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
|
| 146 |
)
|
| 147 |
status_ratio_imp = (status_counts_imp / total_imp * 100).round(1)
|
| 148 |
-
result_df_imp = pd.DataFrame({
|
| 149 |
-
"状態": status_counts_imp.index,
|
| 150 |
-
"件数": status_counts_imp.values,
|
| 151 |
-
"割合(%)": status_ratio_imp.values
|
| 152 |
-
})
|
| 153 |
else:
|
| 154 |
result_df_imp = pd.DataFrame(columns=["状態", "件数", "割合(%)"])
|
| 155 |
status_ratio_imp = pd.Series(dtype=float)
|
| 156 |
|
| 157 |
-
# --- 重要項目ごと
|
| 158 |
result_per_item = []
|
| 159 |
-
for item in set([r["ItemName"] for r in important_results]):
|
| 160 |
item_results = [r for r in important_results if r["ItemName"] == item]
|
| 161 |
-
if not item_results:
|
| 162 |
-
continue
|
| 163 |
total_item = len(item_results)
|
| 164 |
status_counts_item = pd.Series([r["判定"] for r in item_results]).value_counts().reindex(
|
| 165 |
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
|
| 166 |
)
|
| 167 |
status_ratio_item = (status_counts_item / total_item * 100).round(1)
|
| 168 |
for s, c, r in zip(status_counts_item.index, status_counts_item.values, status_ratio_item.values):
|
| 169 |
-
result_per_item.append({
|
| 170 |
-
"ItemName": item,
|
| 171 |
-
"状態": s,
|
| 172 |
-
"件数": int(c),
|
| 173 |
-
"割合(%)": float(r)
|
| 174 |
-
})
|
| 175 |
result_df_imp_items = pd.DataFrame(result_per_item)
|
| 176 |
|
| 177 |
# --- サマリー ---
|
| 178 |
summary = (
|
| 179 |
f"✅ {process_name} の診断完了({start_time} ~ {end_time})\n"
|
| 180 |
+ "[全項目] " + " / ".join([f"{s}:{r:.1f}%" for s, r in status_ratio.items()]) + "\n"
|
| 181 |
-
+ "[重要項目全体] " + (
|
| 182 |
-
" / ".join([f"{s}:{r:.1f}%" for s, r in status_ratio_imp.items()])
|
| 183 |
-
if not result_df_imp.empty else "対象データなし"
|
| 184 |
-
)
|
| 185 |
)
|
| 186 |
|
| 187 |
# --- JSON ---
|
|
@@ -189,9 +160,7 @@ def diagnose_process_range(csv_file, excel_file, process_name, datetime_str, win
|
|
| 189 |
"集計結果": {
|
| 190 |
"全項目割合": {k: float(v) for k, v in status_ratio.to_dict().items()},
|
| 191 |
"重要項目全体割合": {k: float(v) for k, v in status_ratio_imp.to_dict().items()} if not result_df_imp.empty else {},
|
| 192 |
-
"重要項目ごと割合": [
|
| 193 |
-
{k: v for k, v in row.items()} for _, row in result_df_imp_items.iterrows()
|
| 194 |
-
]
|
| 195 |
}
|
| 196 |
}
|
| 197 |
result_json = json.dumps(json_data, ensure_ascii=False, indent=2)
|
|
|
|
| 78 |
else:
|
| 79 |
return "OK"
|
| 80 |
|
| 81 |
+
def diagnose_process_range(process_name, datetime_str, window_minutes):
|
| 82 |
+
global df, thresholds_df
|
| 83 |
+
if df is None or thresholds_df is None:
|
| 84 |
+
return None, None, None, "⚠ ファイル未読み込み", None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 85 |
|
| 86 |
try:
|
| 87 |
target_time = pd.to_datetime(datetime_str)
|
|
|
|
| 101 |
all_results = []
|
| 102 |
for _, row in df_window.iterrows():
|
| 103 |
for _, thr in proc_thresholds.iterrows():
|
| 104 |
+
col_name = f"{thr['ColumnID']}_{thr['ItemName']}_{thr['ProcessNo_ProcessName']}"
|
| 105 |
+
if col_name not in df.columns:
|
| 106 |
continue
|
| 107 |
+
value = row[col_name]
|
| 108 |
status = judge_status(value, thr.get("LL"), thr.get("L"), thr.get("H"), thr.get("HH"))
|
| 109 |
all_results.append({
|
| 110 |
"ColumnID": thr["ColumnID"],
|
|
|
|
| 120 |
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
|
| 121 |
)
|
| 122 |
status_ratio = (status_counts / total * 100).round(1)
|
| 123 |
+
result_df_all = pd.DataFrame({"状態": status_counts.index, "件数": status_counts.values, "割合(%)": status_ratio.values})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 124 |
|
| 125 |
+
# --- 重要項目全体 ---
|
| 126 |
important_results = [r for r in all_results if r["重要項目"]]
|
| 127 |
if important_results:
|
| 128 |
total_imp = len(important_results)
|
|
|
|
| 130 |
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
|
| 131 |
)
|
| 132 |
status_ratio_imp = (status_counts_imp / total_imp * 100).round(1)
|
| 133 |
+
result_df_imp = pd.DataFrame({"状態": status_counts_imp.index, "件数": status_counts_imp.values, "割合(%)": status_ratio_imp.values})
|
|
|
|
|
|
|
|
|
|
|
|
|
| 134 |
else:
|
| 135 |
result_df_imp = pd.DataFrame(columns=["状態", "件数", "割合(%)"])
|
| 136 |
status_ratio_imp = pd.Series(dtype=float)
|
| 137 |
|
| 138 |
+
# --- 重要項目ごと ---
|
| 139 |
result_per_item = []
|
| 140 |
+
for item in set([r["ItemName"] for r in important_results]):
|
| 141 |
item_results = [r for r in important_results if r["ItemName"] == item]
|
|
|
|
|
|
|
| 142 |
total_item = len(item_results)
|
| 143 |
status_counts_item = pd.Series([r["判定"] for r in item_results]).value_counts().reindex(
|
| 144 |
["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
|
| 145 |
)
|
| 146 |
status_ratio_item = (status_counts_item / total_item * 100).round(1)
|
| 147 |
for s, c, r in zip(status_counts_item.index, status_counts_item.values, status_ratio_item.values):
|
| 148 |
+
result_per_item.append({"ItemName": item, "状態": s, "件数": int(c), "割合(%)": float(r)})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
result_df_imp_items = pd.DataFrame(result_per_item)
|
| 150 |
|
| 151 |
# --- サマリー ---
|
| 152 |
summary = (
|
| 153 |
f"✅ {process_name} の診断完了({start_time} ~ {end_time})\n"
|
| 154 |
+ "[全項目] " + " / ".join([f"{s}:{r:.1f}%" for s, r in status_ratio.items()]) + "\n"
|
| 155 |
+
+ "[重要項目全体] " + (" / ".join([f"{s}:{r:.1f}%" for s, r in status_ratio_imp.items()]) if not result_df_imp.empty else "対象データなし")
|
|
|
|
|
|
|
|
|
|
| 156 |
)
|
| 157 |
|
| 158 |
# --- JSON ---
|
|
|
|
| 160 |
"集計結果": {
|
| 161 |
"全項目割合": {k: float(v) for k, v in status_ratio.to_dict().items()},
|
| 162 |
"重要項目全体割合": {k: float(v) for k, v in status_ratio_imp.to_dict().items()} if not result_df_imp.empty else {},
|
| 163 |
+
"重要項目ごと割合": [dict(row) for _, row in result_df_imp_items.iterrows()]
|
|
|
|
|
|
|
| 164 |
}
|
| 165 |
}
|
| 166 |
result_json = json.dumps(json_data, ensure_ascii=False, indent=2)
|