Ken-INOUE commited on
Commit
466d122
·
1 Parent(s): d098a1b

Refactor diagnosis process to improve file handling and error reporting. Enhanced data aggregation for important items and added detailed JSON output for item-wise analysis. Updated function parameters for better clarity.

Browse files
Files changed (1) hide show
  1. app.py +79 -20
app.py CHANGED
@@ -78,66 +78,125 @@ def judge_status(value, ll, l, h, hh):
78
  else:
79
  return "OK"
80
 
81
- def diagnose_process_range(process_name, datetime_str, window_minutes):
82
- global df, thresholds_df
83
- if df is None or thresholds_df is None:
84
- return None, None, None, "⚠ ファイル未読み込み", None
 
 
 
 
 
 
 
 
 
 
 
 
85
  try:
86
  target_time = pd.to_datetime(datetime_str)
87
  except Exception:
88
- return None, None, None, "⚠ 日時が不正です", None
89
 
90
  start_time = target_time - pd.Timedelta(minutes=window_minutes)
91
- df_window = df[(df["timestamp"] >= start_time) & (df["timestamp"] <= target_time)]
 
92
  if df_window.empty:
93
- return None, None, None, "⚠ 指定範囲にデータなし", None
94
 
95
  proc_thresholds = thresholds_df[thresholds_df["ProcessNo_ProcessName"] == process_name]
96
  if proc_thresholds.empty:
97
- return None, None, None, f"⚠ {process_name} の閾値なし", None
98
 
99
  all_results = []
100
  for _, row in df_window.iterrows():
101
  for _, thr in proc_thresholds.iterrows():
102
- col_tuple = f"{thr['ColumnID']}_{thr['ItemName']}_{thr['ProcessNo_ProcessName']}"
103
  if col_tuple not in df.columns:
104
  continue
105
  value = row[col_tuple]
106
  status = judge_status(value, thr.get("LL"), thr.get("L"), thr.get("H"), thr.get("HH"))
107
  all_results.append({
 
108
  "ItemName": thr["ItemName"],
109
  "判定": status,
110
  "重要項目": bool(thr.get("Important", False)),
111
  "時刻": str(row["timestamp"])
112
  })
113
 
 
114
  total = len(all_results)
115
  status_counts = pd.Series([r["判定"] for r in all_results]).value_counts().reindex(
116
- ["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0)
 
117
  status_ratio = (status_counts / total * 100).round(1)
 
 
 
 
 
118
 
119
- result_df_all = pd.DataFrame({"状態": status_counts.index, "件数": status_counts.values, "割合(%)": status_ratio.values})
120
-
121
  important_results = [r for r in all_results if r["重要項目"]]
122
  if important_results:
123
  total_imp = len(important_results)
124
  status_counts_imp = pd.Series([r["判定"] for r in important_results]).value_counts().reindex(
125
- ["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0)
 
126
  status_ratio_imp = (status_counts_imp / total_imp * 100).round(1)
127
- result_df_imp = pd.DataFrame({"状態": status_counts_imp.index, "件数": status_counts_imp.values, "割合(%)": status_ratio_imp.values})
 
 
 
 
128
  else:
129
  result_df_imp = pd.DataFrame(columns=["状態", "件数", "割合(%)"])
130
  status_ratio_imp = pd.Series(dtype=float)
131
 
132
- result_json = json.dumps({
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  "集計結果": {
134
- "全項目割合": {k: convert_value(v) for k, v in status_ratio.to_dict().items()},
135
- "重要項目全体割合": {k: convert_value(v) for k, v in status_ratio_imp.to_dict().items()} if not result_df_imp.empty else {}
 
 
 
136
  }
137
- }, ensure_ascii=False, indent=2)
 
138
 
139
- summary = f"✅ {process_name} の診断完了({start_time} ~ {target_time})"
140
- return result_df_all, result_df_imp, None, summary, result_json
141
 
142
  # --- Tab2: 傾向検出 ---
143
  def detect_trends_with_forecast(process_name, datetime_str, window_minutes, forecast_minutes):
 
78
  else:
79
  return "OK"
80
 
81
+ def diagnose_process_range(csv_file, excel_file, process_name, datetime_str, window_minutes):
82
+ try:
83
+ df = pd.read_csv(csv_file.name, header=[0, 1, 2])
84
+ timestamp_col = df.iloc[:, 0]
85
+ df = df.drop(df.columns[0], axis=1)
86
+ df.insert(0, "timestamp", timestamp_col)
87
+ df["timestamp"] = pd.to_datetime(df["timestamp"], errors="coerce")
88
+
89
+ thresholds_df = pd.read_excel(excel_file.name)
90
+ thresholds_df["Important"] = thresholds_df["Important"].astype(str).str.upper().map({"TRUE": True, "FALSE": False})
91
+ for col in ["LL", "L", "H", "HH"]:
92
+ if col in thresholds_df.columns:
93
+ thresholds_df[col] = pd.to_numeric(thresholds_df[col], errors="coerce")
94
+ except Exception as e:
95
+ return None, None, None, f"❌ 入力ファイルの読み込みに失敗しました: {e}", None
96
+
97
  try:
98
  target_time = pd.to_datetime(datetime_str)
99
  except Exception:
100
+ return None, None, None, f"⚠ 入力した日時 {datetime_str} が無効です。", None
101
 
102
  start_time = target_time - pd.Timedelta(minutes=window_minutes)
103
+ end_time = target_time
104
+ df_window = df[(df["timestamp"] >= start_time) & (df["timestamp"] <= end_time)]
105
  if df_window.empty:
106
+ return None, None, None, "⚠ 指定した時間幅にデータが見つかりません。", None
107
 
108
  proc_thresholds = thresholds_df[thresholds_df["ProcessNo_ProcessName"] == process_name]
109
  if proc_thresholds.empty:
110
+ return None, None, None, f"⚠ プロセス {process_name} の閾値が設定されていません。", None
111
 
112
  all_results = []
113
  for _, row in df_window.iterrows():
114
  for _, thr in proc_thresholds.iterrows():
115
+ col_tuple = (thr["ColumnID"], thr["ItemName"], thr["ProcessNo_ProcessName"])
116
  if col_tuple not in df.columns:
117
  continue
118
  value = row[col_tuple]
119
  status = judge_status(value, thr.get("LL"), thr.get("L"), thr.get("H"), thr.get("HH"))
120
  all_results.append({
121
+ "ColumnID": thr["ColumnID"],
122
  "ItemName": thr["ItemName"],
123
  "判定": status,
124
  "重要項目": bool(thr.get("Important", False)),
125
  "時刻": str(row["timestamp"])
126
  })
127
 
128
+ # --- 全項目集計 ---
129
  total = len(all_results)
130
  status_counts = pd.Series([r["判定"] for r in all_results]).value_counts().reindex(
131
+ ["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
132
+ )
133
  status_ratio = (status_counts / total * 100).round(1)
134
+ result_df_all = pd.DataFrame({
135
+ "状態": status_counts.index,
136
+ "件数": status_counts.values,
137
+ "割合(%)": status_ratio.values
138
+ })
139
 
140
+ # --- 重要項目全体集計 ---
 
141
  important_results = [r for r in all_results if r["重要項目"]]
142
  if important_results:
143
  total_imp = len(important_results)
144
  status_counts_imp = pd.Series([r["判定"] for r in important_results]).value_counts().reindex(
145
+ ["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
146
+ )
147
  status_ratio_imp = (status_counts_imp / total_imp * 100).round(1)
148
+ result_df_imp = pd.DataFrame({
149
+ "状態": status_counts_imp.index,
150
+ "件数": status_counts_imp.values,
151
+ "割合(%)": status_ratio_imp.values
152
+ })
153
  else:
154
  result_df_imp = pd.DataFrame(columns=["状態", "件数", "割合(%)"])
155
  status_ratio_imp = pd.Series(dtype=float)
156
 
157
+ # --- 重要項目ごと集計 ---
158
+ result_per_item = []
159
+ for item in set([r["ItemName"] for r in important_results]): # setで重複除去
160
+ item_results = [r for r in important_results if r["ItemName"] == item]
161
+ if not item_results:
162
+ continue
163
+ total_item = len(item_results)
164
+ status_counts_item = pd.Series([r["判定"] for r in item_results]).value_counts().reindex(
165
+ ["LOW-LOW", "LOW", "OK", "HIGH", "HIGH-HIGH"], fill_value=0
166
+ )
167
+ status_ratio_item = (status_counts_item / total_item * 100).round(1)
168
+ for s, c, r in zip(status_counts_item.index, status_counts_item.values, status_ratio_item.values):
169
+ result_per_item.append({
170
+ "ItemName": item,
171
+ "状態": s,
172
+ "件数": int(c),
173
+ "割合(%)": float(r)
174
+ })
175
+ result_df_imp_items = pd.DataFrame(result_per_item)
176
+
177
+ # --- サマリー ---
178
+ summary = (
179
+ f"✅ {process_name} の診断完了({start_time} ~ {end_time})\n"
180
+ + "[全項目] " + " / ".join([f"{s}:{r:.1f}%" for s, r in status_ratio.items()]) + "\n"
181
+ + "[重要項目全体] " + (
182
+ " / ".join([f"{s}:{r:.1f}%" for s, r in status_ratio_imp.items()])
183
+ if not result_df_imp.empty else "対象データなし"
184
+ )
185
+ )
186
+
187
+ # --- JSON ---
188
+ json_data = {
189
  "集計結果": {
190
+ "全項目割合": {k: float(v) for k, v in status_ratio.to_dict().items()},
191
+ "重要項目全体割合": {k: float(v) for k, v in status_ratio_imp.to_dict().items()} if not result_df_imp.empty else {},
192
+ "重要項目ごと割合": [
193
+ {k: v for k, v in row.items()} for _, row in result_df_imp_items.iterrows()
194
+ ]
195
  }
196
+ }
197
+ result_json = json.dumps(json_data, ensure_ascii=False, indent=2)
198
 
199
+ return result_df_all, result_df_imp, result_df_imp_items, summary, result_json
 
200
 
201
  # --- Tab2: 傾向検出 ---
202
  def detect_trends_with_forecast(process_name, datetime_str, window_minutes, forecast_minutes):