Enhance trend figure generation by introducing grouping options for display (all, category, item) and implementing pagination for better data visualization. Refactor related functions to support new grouping logic and update UI components accordingly.
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ import pandas as pd
|
|
| 3 |
import numpy as np
|
| 4 |
import os
|
| 5 |
import re
|
| 6 |
-
from typing import Dict, Tuple, List, Optional
|
| 7 |
import plotly.graph_objects as go
|
| 8 |
from plotly.subplots import make_subplots
|
| 9 |
import plotly.io as pio
|
|
@@ -41,8 +41,6 @@ def try_read_csv_3header(path_or_file) -> pd.DataFrame:
|
|
| 41 |
df = df.drop(df.columns[0], axis=1)
|
| 42 |
df.insert(0, "timestamp", ts)
|
| 43 |
|
| 44 |
-
# 列名はタプルのまま保持(timestampは str)
|
| 45 |
-
# ただし内部処理用に文字列連結も作成できるように関数を用意
|
| 46 |
return df
|
| 47 |
|
| 48 |
def col_tuple_to_str(col) -> str:
|
|
@@ -62,12 +60,10 @@ def build_index_maps(df: pd.DataFrame):
|
|
| 62 |
if isinstance(col, tuple) and len(col) >= 3:
|
| 63 |
col_id, item_name, process_name = str(col[0]), str(col[1]), str(col[2])
|
| 64 |
else:
|
| 65 |
-
# 非タプル(安全策)
|
| 66 |
parts = str(col).split("_")
|
| 67 |
if len(parts) >= 3:
|
| 68 |
col_id, item_name, process_name = parts[0], "_".join(parts[1:-1]), parts[-1]
|
| 69 |
else:
|
| 70 |
-
# プロセスが分からない列はスキップ
|
| 71 |
continue
|
| 72 |
rec = {
|
| 73 |
"col_tuple": col,
|
|
@@ -77,30 +73,34 @@ def build_index_maps(df: pd.DataFrame):
|
|
| 77 |
"col_str": col_tuple_to_str(col),
|
| 78 |
}
|
| 79 |
process_map.setdefault(process_name, []).append(rec)
|
| 80 |
-
# プロセス候補・アイテム候補を返すために使う
|
| 81 |
processes = sorted(list(process_map.keys()), key=lambda x: normalize(x))
|
| 82 |
return process_map, processes
|
| 83 |
|
| 84 |
def extract_measure_tag(item_name: str) -> str:
|
| 85 |
"""
|
| 86 |
-
項目名末尾の計測項目タグを抽出。
|
| 87 |
-
例:
|
| 88 |
-
"処理水 有機物 分析値 [mg/L]" → "mg/L"
|
| 89 |
-
"原水 TOC" → "TOC"
|
| 90 |
-
"導電率(電気伝導度) [mS/cm]" → "mS/cm"
|
| 91 |
-
優先順:
|
| 92 |
-
1) [...] の中身
|
| 93 |
-
2) 全角/半角スペース区切りの末尾語(英字混在や記号含む)
|
| 94 |
"""
|
| 95 |
s = normalize(item_name)
|
| 96 |
m = re.search(r"\[([^\[\]]+)\]\s*$", s)
|
| 97 |
if m:
|
| 98 |
return m.group(1).strip()
|
| 99 |
-
# 角括弧がなければ末尾語
|
| 100 |
tokens = re.split(r"\s+", s)
|
| 101 |
-
if tokens
|
| 102 |
-
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
# ======================================
|
| 106 |
# しきい値ハンドリング
|
|
@@ -114,10 +114,8 @@ def try_read_thresholds_excel(file) -> Optional[pd.DataFrame]:
|
|
| 114 |
return None
|
| 115 |
df = pd.read_excel(file)
|
| 116 |
df.columns = [normalize(c) for c in df.columns]
|
| 117 |
-
# 必須カラム確認(最低限)
|
| 118 |
needed = {"ColumnID", "ItemName", "ProcessNo_ProcessName"}
|
| 119 |
if not needed.issubset(set(df.columns)):
|
| 120 |
-
# 列名が違う場合の簡易吸収
|
| 121 |
rename_map = {}
|
| 122 |
for k in list(df.columns):
|
| 123 |
nk = normalize(str(k))
|
|
@@ -129,7 +127,6 @@ def try_read_thresholds_excel(file) -> Optional[pd.DataFrame]:
|
|
| 129 |
rename_map[k] = "ProcessNo_ProcessName"
|
| 130 |
if rename_map:
|
| 131 |
df = df.rename(columns=rename_map)
|
| 132 |
-
# 数値化
|
| 133 |
for c in ["LL", "L", "H", "HH"]:
|
| 134 |
if c in df.columns:
|
| 135 |
df[c] = pd.to_numeric(df[c], errors="coerce")
|
|
@@ -159,8 +156,7 @@ def build_threshold_lookup(thr_df: Optional[pd.DataFrame]) -> Dict[Tuple[str, st
|
|
| 159 |
|
| 160 |
def auto_threshold(series: pd.Series) -> Tuple[float, float, float, float]:
|
| 161 |
"""
|
| 162 |
-
自動しきい値: mean ± std(LL/L/H/HH
|
| 163 |
-
例: L=mean-std, LL=mean-2std, H=mean+std, HH=mean+2std
|
| 164 |
"""
|
| 165 |
s = series.dropna()
|
| 166 |
if len(s) < 5:
|
|
@@ -180,166 +176,47 @@ def judge_status(value, LL, L, H, HH) -> str:
|
|
| 180 |
return "H"
|
| 181 |
return "OK"
|
| 182 |
|
| 183 |
-
#
|
| 184 |
STATUS_COLOR = {
|
| 185 |
-
"LL": "#2b6cb0",
|
| 186 |
-
"L": "#63b3ed",
|
| 187 |
-
"OK": "#a0aec0",
|
| 188 |
-
"H": "#f6ad55",
|
| 189 |
-
"HH": "#e53e3e",
|
| 190 |
}
|
| 191 |
-
|
| 192 |
-
# 線色(系列ライン):列ごとに安定色
|
| 193 |
-
LINE_COLOR = "#4a5568" # 濃いグレー
|
| 194 |
|
| 195 |
# ======================================
|
| 196 |
-
#
|
|
|
|
| 197 |
# ======================================
|
| 198 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 199 |
df: pd.DataFrame,
|
| 200 |
process_map: Dict[str, List[dict]],
|
| 201 |
process_name: str,
|
| 202 |
selected_items: List[str],
|
| 203 |
thr_df: Optional[pd.DataFrame],
|
| 204 |
-
thr_mode: str,
|
| 205 |
-
date_min: Optional[str]
|
| 206 |
-
date_max: Optional[str]
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
計測項目タグごと(extract_measure_tag)に図を分けて生成。
|
| 210 |
-
selected_items は「2行目(ItemName)」の値。
|
| 211 |
-
"""
|
| 212 |
-
if df is None or process_name is None or process_name == "":
|
| 213 |
-
return []
|
| 214 |
-
|
| 215 |
-
# 対象プロセスの列レコード
|
| 216 |
-
recs = process_map.get(process_name, [])
|
| 217 |
-
if not recs:
|
| 218 |
-
return []
|
| 219 |
-
|
| 220 |
-
# 2行目(ItemName)で絞り込み
|
| 221 |
-
selected_items_set = set([normalize(x) for x in (selected_items or [])])
|
| 222 |
-
recs = [r for r in recs if normalize(r["item"]) in selected_items_set]
|
| 223 |
-
if not recs:
|
| 224 |
-
return []
|
| 225 |
-
|
| 226 |
-
# 日付範囲フィルタ
|
| 227 |
-
dfw = df.copy()
|
| 228 |
-
if date_min:
|
| 229 |
-
dfw = dfw[dfw["timestamp"] >= pd.to_datetime(date_min)]
|
| 230 |
-
if date_max:
|
| 231 |
-
dfw = dfw[dfw["timestamp"] <= pd.to_datetime(date_max)]
|
| 232 |
-
if dfw.empty:
|
| 233 |
-
return []
|
| 234 |
-
|
| 235 |
-
# しきい値参照
|
| 236 |
-
thr_lookup = build_threshold_lookup(thr_df) if thr_mode == "excel" else {}
|
| 237 |
-
|
| 238 |
-
# 測定項目タグごとにグループ化
|
| 239 |
-
groups: Dict[str, List[dict]] = {}
|
| 240 |
-
for r in recs:
|
| 241 |
-
tag = extract_measure_tag(r["item"])
|
| 242 |
-
groups.setdefault(tag, []).append(r)
|
| 243 |
-
|
| 244 |
-
figs = []
|
| 245 |
-
for tag, cols in groups.items():
|
| 246 |
-
fig = go.Figure()
|
| 247 |
-
# 各列を描画
|
| 248 |
-
for r in cols:
|
| 249 |
-
col = r["col_tuple"]
|
| 250 |
-
col_str = r["col_str"]
|
| 251 |
-
if col not in dfw.columns:
|
| 252 |
-
# まれにヘッダー崩れなど
|
| 253 |
-
if col_str in dfw.columns:
|
| 254 |
-
series = dfw[col_str]
|
| 255 |
-
else:
|
| 256 |
-
continue
|
| 257 |
-
else:
|
| 258 |
-
series = dfw[col]
|
| 259 |
-
|
| 260 |
-
# 値
|
| 261 |
-
x = dfw["timestamp"]
|
| 262 |
-
y = pd.to_numeric(series, errors="coerce")
|
| 263 |
-
|
| 264 |
-
# しきい値決定
|
| 265 |
-
if thr_mode == "excel":
|
| 266 |
-
key = (normalize(r["id"]), normalize(r["item"]), normalize(r["process"]))
|
| 267 |
-
LL, L, H, HH = thr_lookup.get(key, (np.nan, np.nan, np.nan, np.nan))
|
| 268 |
-
# Excelに見つからない場合は自動にフォールバック
|
| 269 |
-
if all(pd.isna(v) for v in [LL, L, H, HH]):
|
| 270 |
-
LL, L, H, HH = auto_threshold(y)
|
| 271 |
-
else:
|
| 272 |
-
LL, L, H, HH = auto_threshold(y)
|
| 273 |
-
|
| 274 |
-
# 状態ごとに点色を決める
|
| 275 |
-
colors = []
|
| 276 |
-
for v in y:
|
| 277 |
-
if pd.isna(v):
|
| 278 |
-
colors.append("rgba(0,0,0,0)")
|
| 279 |
-
else:
|
| 280 |
-
st = judge_status(v, LL, L, H, HH)
|
| 281 |
-
colors.append(STATUS_COLOR.get(st, STATUS_COLOR["OK"]))
|
| 282 |
-
|
| 283 |
-
# 下地のライン(視認性のため薄色)
|
| 284 |
-
fig.add_trace(go.Scatter(
|
| 285 |
-
x=x, y=y, mode="lines",
|
| 286 |
-
name=f"{r['item']} ({r['id']})",
|
| 287 |
-
line=dict(color=LINE_COLOR, width=1.5),
|
| 288 |
-
hovertemplate="%{x}<br>%{y}<extra>"+f"{r['item']} ({r['id']})"+"</extra>"
|
| 289 |
-
))
|
| 290 |
-
# 色付きマーカーで逸脱強調
|
| 291 |
-
fig.add_trace(go.Scatter(
|
| 292 |
-
x=x, y=y, mode="markers",
|
| 293 |
-
name=f"{r['item']} markers",
|
| 294 |
-
marker=dict(size=6, color=colors),
|
| 295 |
-
showlegend=False,
|
| 296 |
-
hovertemplate="%{x}<br>%{y}<extra></extra>"
|
| 297 |
-
))
|
| 298 |
-
|
| 299 |
-
# しきい値ガイド(あれば)
|
| 300 |
-
def add_hline(val, label):
|
| 301 |
-
if pd.notna(val):
|
| 302 |
-
fig.add_hline(y=float(val), line=dict(width=1, dash="dot"),
|
| 303 |
-
annotation_text=label, annotation_position="top left")
|
| 304 |
-
|
| 305 |
-
add_hline(LL, "LL")
|
| 306 |
-
add_hline(L, "L")
|
| 307 |
-
add_hline(H, "H")
|
| 308 |
-
add_hline(HH, "HH")
|
| 309 |
-
|
| 310 |
-
fig.update_layout(
|
| 311 |
-
title=f"{process_name} | 計測項目: {tag}",
|
| 312 |
-
xaxis_title="timestamp",
|
| 313 |
-
yaxis_title=tag,
|
| 314 |
-
legend_title="系列",
|
| 315 |
-
margin=dict(l=10, r=10, t=40, b=10),
|
| 316 |
-
hovermode="x unified",
|
| 317 |
-
)
|
| 318 |
-
figs.append(fig)
|
| 319 |
-
|
| 320 |
-
return figs
|
| 321 |
-
|
| 322 |
-
# ======================================
|
| 323 |
-
# 新規:サブプロット1枚でまとめる図
|
| 324 |
-
# ======================================
|
| 325 |
-
def make_trend_figure(
|
| 326 |
-
df: pd.DataFrame,
|
| 327 |
-
process_map: Dict[str, List[dict]],
|
| 328 |
-
process_name: str,
|
| 329 |
-
selected_items: List[str],
|
| 330 |
-
thr_df: Optional[pd.DataFrame],
|
| 331 |
-
thr_mode: str, # "excel" or "auto"
|
| 332 |
-
date_min: Optional[str] = None,
|
| 333 |
-
date_max: Optional[str] = None,
|
| 334 |
-
_force_tags: Optional[List[str]] = None, # ← 追加:ページ分割用に表示するタグを指定
|
| 335 |
) -> Optional[go.Figure]:
|
| 336 |
if df is None or not process_name:
|
| 337 |
return None
|
| 338 |
recs = process_map.get(process_name, [])
|
| 339 |
if not recs:
|
| 340 |
return None
|
| 341 |
-
|
| 342 |
-
recs = [r for r in recs if normalize(r["item"]) in
|
| 343 |
if not recs:
|
| 344 |
return None
|
| 345 |
|
|
@@ -352,33 +229,42 @@ def make_trend_figure(
|
|
| 352 |
return None
|
| 353 |
|
| 354 |
thr_lookup = build_threshold_lookup(thr_df) if thr_mode == "excel" else {}
|
|
|
|
| 355 |
|
| 356 |
-
#
|
| 357 |
groups: Dict[str, List[dict]] = {}
|
| 358 |
for r in recs:
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
if not
|
| 363 |
return None
|
| 364 |
|
| 365 |
-
rows = len(
|
| 366 |
-
# rows が多いときは、Plotly の制約: vertical_spacing <= 1/(rows-1)
|
| 367 |
if rows <= 1:
|
| 368 |
vspace = 0.03
|
| 369 |
else:
|
| 370 |
-
max_vs = (1.0 / (rows - 1)) - 1e-4
|
| 371 |
vspace = max(0.0, min(0.03, max_vs))
|
| 372 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
fig = make_subplots(
|
| 374 |
rows=rows, cols=1, shared_xaxes=True,
|
| 375 |
vertical_spacing=vspace,
|
| 376 |
-
subplot_titles=
|
| 377 |
)
|
| 378 |
|
|
|
|
| 379 |
row_idx = 1
|
| 380 |
-
for
|
| 381 |
-
cols = groups[
|
| 382 |
for r in cols:
|
| 383 |
col = r["col_tuple"]
|
| 384 |
col_str = r["col_str"]
|
|
@@ -428,28 +314,26 @@ def make_trend_figure(
|
|
| 428 |
),
|
| 429 |
row=row_idx, col=1
|
| 430 |
)
|
| 431 |
-
|
| 432 |
-
for val, label in [(LL, "LL"), (L, "L"), (H, "H"), (HH, "HH")]:
|
| 433 |
-
if pd.notna(val):
|
| 434 |
-
fig.add_hline(
|
| 435 |
-
y=float(val), line=dict(width=1, dash="dot"),
|
| 436 |
-
annotation_text=label, annotation_position="top left",
|
| 437 |
-
row=row_idx, col=1
|
| 438 |
-
)
|
| 439 |
row_idx += 1
|
| 440 |
|
| 441 |
fig.update_layout(
|
| 442 |
-
title=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
xaxis_title="timestamp",
|
| 444 |
showlegend=True,
|
| 445 |
margin=dict(l=10, r=10, t=40, b=10),
|
| 446 |
hovermode="x unified",
|
| 447 |
-
height=max(
|
| 448 |
)
|
| 449 |
return fig
|
| 450 |
|
| 451 |
-
#
|
| 452 |
-
def
|
| 453 |
df: pd.DataFrame,
|
| 454 |
process_map: Dict[str, List[dict]],
|
| 455 |
process_name: str,
|
|
@@ -459,145 +343,33 @@ def make_trend_figure_paged(
|
|
| 459 |
date_min: Optional[str],
|
| 460 |
date_max: Optional[str],
|
| 461 |
page: int,
|
| 462 |
-
|
|
|
|
| 463 |
) -> Tuple[Optional[go.Figure], int, List[str]]:
|
| 464 |
-
# 対象タグの全一覧を作る
|
| 465 |
recs = process_map.get(process_name, [])
|
| 466 |
if not recs:
|
| 467 |
return None, 0, []
|
| 468 |
-
|
| 469 |
-
recs = [r for r in recs if normalize(r["item"]) in
|
| 470 |
if not recs:
|
| 471 |
return None, 0, []
|
| 472 |
-
groups: Dict[str, List[dict]] = {}
|
| 473 |
-
for r in recs:
|
| 474 |
-
groups.setdefault(extract_measure_tag(r["item"]), []).append(r)
|
| 475 |
-
all_tags = list(groups.keys())
|
| 476 |
-
total_pages = max(1, int(np.ceil(len(all_tags) / max(1, tags_per_page))))
|
| 477 |
-
page = int(max(1, min(page, total_pages)))
|
| 478 |
-
start = (page - 1) * tags_per_page
|
| 479 |
-
end = start + tags_per_page
|
| 480 |
-
tags_slice = all_tags[start:end]
|
| 481 |
-
fig = make_trend_figure(
|
| 482 |
-
df, process_map, process_name, selected_items, thr_df, thr_mode, date_min, date_max, _force_tags=tags_slice
|
| 483 |
-
)
|
| 484 |
-
return fig, total_pages, all_tags
|
| 485 |
-
|
| 486 |
-
# ======================================
|
| 487 |
-
# 新規:計測項目タグごとに個別Figure
|
| 488 |
-
# ======================================
|
| 489 |
-
def make_trend_figs_by_tag(
|
| 490 |
-
df: pd.DataFrame,
|
| 491 |
-
process_map: Dict[str, List[dict]],
|
| 492 |
-
process_name: str,
|
| 493 |
-
selected_items: List[str],
|
| 494 |
-
thr_df: Optional[pd.DataFrame],
|
| 495 |
-
thr_mode: str,
|
| 496 |
-
date_min: Optional[str] = None,
|
| 497 |
-
date_max: Optional[str] = None,
|
| 498 |
-
) -> Dict[str, go.Figure]:
|
| 499 |
-
if df is None or not process_name:
|
| 500 |
-
return {}
|
| 501 |
-
recs = process_map.get(process_name, [])
|
| 502 |
-
if not recs:
|
| 503 |
-
return {}
|
| 504 |
-
selected_items_set = set([normalize(x) for x in (selected_items or [])])
|
| 505 |
-
recs = [r for r in recs if normalize(r["item"]) in selected_items_set]
|
| 506 |
-
if not recs:
|
| 507 |
-
return {}
|
| 508 |
-
|
| 509 |
-
dfw = df.copy()
|
| 510 |
-
if date_min:
|
| 511 |
-
dfw = dfw[dfw["timestamp"] >= pd.to_datetime(date_min)]
|
| 512 |
-
if date_max:
|
| 513 |
-
dfw = dfw[dfw["timestamp"] <= pd.to_datetime(date_max)]
|
| 514 |
-
if dfw.empty:
|
| 515 |
-
return {}
|
| 516 |
-
|
| 517 |
-
thr_lookup = build_threshold_lookup(thr_df) if thr_mode == "excel" else {}
|
| 518 |
|
|
|
|
| 519 |
groups: Dict[str, List[dict]] = {}
|
| 520 |
for r in recs:
|
| 521 |
-
|
| 522 |
-
|
| 523 |
-
|
| 524 |
-
|
| 525 |
-
|
| 526 |
-
|
| 527 |
-
|
| 528 |
-
col = r["col_tuple"]
|
| 529 |
-
col_str = r["col_str"]
|
| 530 |
-
if col in dfw.columns:
|
| 531 |
-
series = dfw[col]
|
| 532 |
-
elif col_str in dfw.columns:
|
| 533 |
-
series = dfw[col_str]
|
| 534 |
-
else:
|
| 535 |
-
continue
|
| 536 |
-
|
| 537 |
-
x = dfw["timestamp"]
|
| 538 |
-
y = pd.to_numeric(series, errors="coerce")
|
| 539 |
-
|
| 540 |
-
if thr_mode == "excel":
|
| 541 |
-
key = (normalize(r["id"]), normalize(r["item"]), normalize(r["process"]))
|
| 542 |
-
LL, L, H, HH = thr_lookup.get(key, (np.nan, np.nan, np.nan, np.nan))
|
| 543 |
-
if all(pd.isna(v) for v in [LL, L, H, HH]):
|
| 544 |
-
LL, L, H, HH = auto_threshold(y)
|
| 545 |
-
else:
|
| 546 |
-
LL, L, H, HH = auto_threshold(y)
|
| 547 |
-
|
| 548 |
-
fig.add_trace(go.Scatter(
|
| 549 |
-
x=x, y=y, mode="lines",
|
| 550 |
-
name=f"{r['item']} ({r['id']})",
|
| 551 |
-
line=dict(color=LINE_COLOR, width=1.5),
|
| 552 |
-
hovertemplate="%{x}<br>%{y}<extra>"+f"{r['item']} ({r['id']})"+"</extra>"
|
| 553 |
-
))
|
| 554 |
-
|
| 555 |
-
colors = []
|
| 556 |
-
for v in y:
|
| 557 |
-
if pd.isna(v):
|
| 558 |
-
colors.append("rgba(0,0,0,0)")
|
| 559 |
-
else:
|
| 560 |
-
st = judge_status(v, LL, L, H, HH)
|
| 561 |
-
colors.append(STATUS_COLOR.get(st, STATUS_COLOR["OK"]))
|
| 562 |
|
| 563 |
-
|
| 564 |
-
|
| 565 |
-
|
| 566 |
-
|
| 567 |
-
|
| 568 |
-
hovertemplate="%{x}<br>%{y}<extra></extra>"
|
| 569 |
-
))
|
| 570 |
-
|
| 571 |
-
for val, label in [(LL, "LL"), (L, "L"), (H, "H"), (HH, "HH")]:
|
| 572 |
-
if pd.notna(val):
|
| 573 |
-
fig.add_hline(y=float(val), line=dict(width=1, dash="dot"),
|
| 574 |
-
annotation_text=label, annotation_position="top left")
|
| 575 |
-
|
| 576 |
-
fig.update_layout(
|
| 577 |
-
title=f"{process_name} | 計測項目: {tag}",
|
| 578 |
-
xaxis_title="timestamp",
|
| 579 |
-
yaxis_title=tag,
|
| 580 |
-
legend_title="系列",
|
| 581 |
-
margin=dict(l=10, r=10, t=40, b=10),
|
| 582 |
-
hovermode="x unified",
|
| 583 |
-
)
|
| 584 |
-
out[tag] = fig
|
| 585 |
-
return out
|
| 586 |
-
|
| 587 |
-
def figures_to_html(figs_by_tag: Dict[str, go.Figure]) -> str:
|
| 588 |
-
"""PlotlyJS埋め込み方式(ブラウザ側で描画)"""
|
| 589 |
-
parts = []
|
| 590 |
-
first = True
|
| 591 |
-
for tag, fig in figs_by_tag.items():
|
| 592 |
-
html = pio.to_html(fig, include_plotlyjs='cdn' if first else False, full_html=False)
|
| 593 |
-
parts.append(
|
| 594 |
-
f'<div style="margin:16px 0;border:1px solid #e5e7eb;border-radius:8px;padding:8px">'
|
| 595 |
-
f'<div style="font-weight:600;margin:4px 0 8px 0;">{tag}</div>'
|
| 596 |
-
f'{html}'
|
| 597 |
-
f'</div>'
|
| 598 |
-
)
|
| 599 |
-
first = False
|
| 600 |
-
return "\n".join(parts)
|
| 601 |
|
| 602 |
# ======================================
|
| 603 |
# グローバル状態(UI間共有)
|
|
@@ -620,7 +392,11 @@ def initialize_default_csv():
|
|
| 620 |
df = try_read_csv_3header(DEFAULT_CSV_PATH)
|
| 621 |
G_DF = df
|
| 622 |
G_PROCESS_MAP, G_PROCESSES = build_index_maps(df)
|
| 623 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 624 |
except Exception as e:
|
| 625 |
return f"⚠ 既定CSV読み込み失敗: {e}", gr.update(), []
|
| 626 |
return "ℹ CSVをアップロードしてください。", gr.update(), []
|
|
@@ -636,7 +412,11 @@ def on_csv_upload(file):
|
|
| 636 |
df = try_read_csv_3header(file.name if hasattr(file, "name") else file)
|
| 637 |
G_DF = df
|
| 638 |
G_PROCESS_MAP, G_PROCESSES = build_index_maps(df)
|
| 639 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 640 |
except Exception as e:
|
| 641 |
return f"❌ 読み込みエラー: {e}", gr.update(choices=[]), []
|
| 642 |
|
|
@@ -666,27 +446,14 @@ def update_items(process_name: str):
|
|
| 666 |
# デフォルトは全選択
|
| 667 |
return gr.update(choices=items, value=items)
|
| 668 |
|
| 669 |
-
def
|
|
|
|
| 670 |
"""
|
| 671 |
-
|
|
|
|
|
|
|
|
|
|
| 672 |
"""
|
| 673 |
-
if G_DF is None:
|
| 674 |
-
return "⚠ データ未読み込み", []
|
| 675 |
-
if not process_name:
|
| 676 |
-
return "⚠ プロセスを選択してください", []
|
| 677 |
-
if not items:
|
| 678 |
-
return "⚠ 項目を選択してください", []
|
| 679 |
-
|
| 680 |
-
figs = make_trend_figs(
|
| 681 |
-
G_DF, G_PROCESS_MAP, process_name, items, G_THRESHOLDS_DF, thr_mode, date_min, date_max
|
| 682 |
-
)
|
| 683 |
-
if not figs:
|
| 684 |
-
return "⚠ 図を生成できませんでした(データ無し or 条件不一致)", []
|
| 685 |
-
return f"✅ {process_name}: {len(figs)}枚のトレンド図を生成しました(計測項目タグごと)", figs
|
| 686 |
-
|
| 687 |
-
def render_any(process_name: str, items: List[str], display_mode: str, thr_mode_label: str,
|
| 688 |
-
date_min, date_max, page: int, tpp: int):
|
| 689 |
-
"""表示形式に応じて Plot を返す(個別はページ分割)。"""
|
| 690 |
if G_DF is None:
|
| 691 |
return "⚠ データ未読み込み", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 692 |
if not process_name:
|
|
@@ -696,20 +463,35 @@ def render_any(process_name: str, items: List[str], display_mode: str, thr_mode_
|
|
| 696 |
|
| 697 |
mode = "excel" if str(thr_mode_label).startswith("excel") else "auto"
|
| 698 |
|
| 699 |
-
|
| 700 |
-
|
|
|
|
|
|
|
|
|
|
| 701 |
if fig is None:
|
| 702 |
return "⚠ 図を生成できませんでした(データ無し or 条件不一致)", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 703 |
-
return "✅
|
| 704 |
-
|
| 705 |
-
|
| 706 |
-
|
| 707 |
-
|
|
|
|
|
|
|
| 708 |
)
|
| 709 |
if fig is None:
|
| 710 |
return "⚠ 図を生成できませんでした(データ無し or 条件不一致)", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 711 |
-
info = f"
|
| 712 |
-
return "✅
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 713 |
|
| 714 |
# ======================================
|
| 715 |
# UI
|
|
@@ -721,7 +503,7 @@ init_choices = init_proc_update.get("choices") if isinstance(init_proc_update, d
|
|
| 721 |
with gr.Blocks(css="""
|
| 722 |
.gradio-container {overflow: auto !important;}
|
| 723 |
""") as demo:
|
| 724 |
-
gr.Markdown("## トレンドグラフ専用アプリ(3
|
| 725 |
|
| 726 |
with gr.Row():
|
| 727 |
csv_uploader = gr.File(label="① 時系列CSV(3行ヘッダー)", file_count="single", file_types=[".csv"])
|
|
@@ -738,8 +520,8 @@ with gr.Blocks(css="""
|
|
| 738 |
|
| 739 |
# 表示形式の切り替え
|
| 740 |
display_mode = gr.Radio(
|
| 741 |
-
["
|
| 742 |
-
value="
|
| 743 |
label="表示形式"
|
| 744 |
)
|
| 745 |
|
|
@@ -754,17 +536,14 @@ with gr.Blocks(css="""
|
|
| 754 |
btn_render = gr.Button("トレンド図を生成", variant="primary")
|
| 755 |
|
| 756 |
msg = gr.Markdown()
|
| 757 |
-
|
| 758 |
-
|
| 759 |
-
#
|
| 760 |
with gr.Row():
|
| 761 |
-
|
| 762 |
page_no = gr.Number(value=1, label="ページ(1〜)", precision=0, visible=False)
|
| 763 |
page_info = gr.Markdown(visible=False)
|
| 764 |
|
| 765 |
-
# コールバック接続
|
| 766 |
-
# 既定CSVの手動代入は不要(生成時に付与済み)
|
| 767 |
-
|
| 768 |
# 2) CSVアップロードで更新
|
| 769 |
csv_uploader.change(
|
| 770 |
on_csv_upload,
|
|
@@ -788,22 +567,22 @@ with gr.Blocks(css="""
|
|
| 788 |
|
| 789 |
# 5) 図生成
|
| 790 |
btn_render.click(
|
| 791 |
-
fn=lambda proc, items, disp_mode, mode, dmin, dmax, p,
|
| 792 |
-
|
|
|
|
| 793 |
outputs=[msg, plot, page_info, page_no],
|
| 794 |
)
|
| 795 |
|
| 796 |
-
# 6)
|
| 797 |
def _toggle_page_controls(mode):
|
| 798 |
-
show = str(mode).startswith("
|
| 799 |
return gr.update(visible=show), gr.update(visible=show), gr.update(visible=show)
|
| 800 |
display_mode.change(
|
| 801 |
_toggle_page_controls,
|
| 802 |
inputs=[display_mode],
|
| 803 |
-
outputs=[
|
| 804 |
)
|
| 805 |
|
| 806 |
-
|
| 807 |
if __name__ == "__main__":
|
| 808 |
-
#
|
| 809 |
demo.launch(ssr_mode=False)
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
import os
|
| 5 |
import re
|
| 6 |
+
from typing import Dict, Tuple, List, Optional, Callable
|
| 7 |
import plotly.graph_objects as go
|
| 8 |
from plotly.subplots import make_subplots
|
| 9 |
import plotly.io as pio
|
|
|
|
| 41 |
df = df.drop(df.columns[0], axis=1)
|
| 42 |
df.insert(0, "timestamp", ts)
|
| 43 |
|
|
|
|
|
|
|
| 44 |
return df
|
| 45 |
|
| 46 |
def col_tuple_to_str(col) -> str:
|
|
|
|
| 60 |
if isinstance(col, tuple) and len(col) >= 3:
|
| 61 |
col_id, item_name, process_name = str(col[0]), str(col[1]), str(col[2])
|
| 62 |
else:
|
|
|
|
| 63 |
parts = str(col).split("_")
|
| 64 |
if len(parts) >= 3:
|
| 65 |
col_id, item_name, process_name = parts[0], "_".join(parts[1:-1]), parts[-1]
|
| 66 |
else:
|
|
|
|
| 67 |
continue
|
| 68 |
rec = {
|
| 69 |
"col_tuple": col,
|
|
|
|
| 73 |
"col_str": col_tuple_to_str(col),
|
| 74 |
}
|
| 75 |
process_map.setdefault(process_name, []).append(rec)
|
|
|
|
| 76 |
processes = sorted(list(process_map.keys()), key=lambda x: normalize(x))
|
| 77 |
return process_map, processes
|
| 78 |
|
| 79 |
def extract_measure_tag(item_name: str) -> str:
|
| 80 |
"""
|
| 81 |
+
項目名末尾の計測項目タグを抽出。([...]優先→末尾語)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 82 |
"""
|
| 83 |
s = normalize(item_name)
|
| 84 |
m = re.search(r"\[([^\[\]]+)\]\s*$", s)
|
| 85 |
if m:
|
| 86 |
return m.group(1).strip()
|
|
|
|
| 87 |
tokens = re.split(r"\s+", s)
|
| 88 |
+
return tokens[-1] if tokens else s
|
| 89 |
+
|
| 90 |
+
def extract_category(item_name: str) -> str:
|
| 91 |
+
"""
|
| 92 |
+
項目名の「最後の '_' 以降」をカテゴリ名として返す。
|
| 93 |
+
例: '除害RO_A処理水_導電率' → '導電率' / '..._圧力' → '圧力'
|
| 94 |
+
'_' が無い場合は「処理水…」の後ろや末尾語を推定。
|
| 95 |
+
"""
|
| 96 |
+
s = normalize(item_name)
|
| 97 |
+
if "_" in s:
|
| 98 |
+
return s.split("_")[-1].strip()
|
| 99 |
+
m = re.search(r"処理水[_\s]*(.+)$", s)
|
| 100 |
+
if m:
|
| 101 |
+
return m.group(1).strip()
|
| 102 |
+
toks = re.split(r"\s+", s)
|
| 103 |
+
return toks[-1] if toks else s
|
| 104 |
|
| 105 |
# ======================================
|
| 106 |
# しきい値ハンドリング
|
|
|
|
| 114 |
return None
|
| 115 |
df = pd.read_excel(file)
|
| 116 |
df.columns = [normalize(c) for c in df.columns]
|
|
|
|
| 117 |
needed = {"ColumnID", "ItemName", "ProcessNo_ProcessName"}
|
| 118 |
if not needed.issubset(set(df.columns)):
|
|
|
|
| 119 |
rename_map = {}
|
| 120 |
for k in list(df.columns):
|
| 121 |
nk = normalize(str(k))
|
|
|
|
| 127 |
rename_map[k] = "ProcessNo_ProcessName"
|
| 128 |
if rename_map:
|
| 129 |
df = df.rename(columns=rename_map)
|
|
|
|
| 130 |
for c in ["LL", "L", "H", "HH"]:
|
| 131 |
if c in df.columns:
|
| 132 |
df[c] = pd.to_numeric(df[c], errors="coerce")
|
|
|
|
| 156 |
|
| 157 |
def auto_threshold(series: pd.Series) -> Tuple[float, float, float, float]:
|
| 158 |
"""
|
| 159 |
+
自動しきい値: mean ± std(LL/L/H/HH を mean±2sd / ±1sd とする)
|
|
|
|
| 160 |
"""
|
| 161 |
s = series.dropna()
|
| 162 |
if len(s) < 5:
|
|
|
|
| 176 |
return "H"
|
| 177 |
return "OK"
|
| 178 |
|
| 179 |
+
# カラー設定
|
| 180 |
STATUS_COLOR = {
|
| 181 |
+
"LL": "#2b6cb0",
|
| 182 |
+
"L": "#63b3ed",
|
| 183 |
+
"OK": "#a0aec0",
|
| 184 |
+
"H": "#f6ad55",
|
| 185 |
+
"HH": "#e53e3e",
|
| 186 |
}
|
| 187 |
+
LINE_COLOR = "#4a5568"
|
|
|
|
|
|
|
| 188 |
|
| 189 |
# ======================================
|
| 190 |
+
# 汎用:グループキーに応じて図を作る(サブプロ��ト)
|
| 191 |
+
# group_by: "all" / "category" / "item"
|
| 192 |
# ======================================
|
| 193 |
+
def _group_key_func(group_by: str) -> Callable[[dict], str]:
|
| 194 |
+
if group_by == "item":
|
| 195 |
+
return lambda rr: normalize(rr["item"])
|
| 196 |
+
if group_by == "category":
|
| 197 |
+
return lambda rr: extract_category(rr["item"])
|
| 198 |
+
# "all"
|
| 199 |
+
return lambda rr: "ALL"
|
| 200 |
+
|
| 201 |
+
def make_grouped_figure(
|
| 202 |
df: pd.DataFrame,
|
| 203 |
process_map: Dict[str, List[dict]],
|
| 204 |
process_name: str,
|
| 205 |
selected_items: List[str],
|
| 206 |
thr_df: Optional[pd.DataFrame],
|
| 207 |
+
thr_mode: str,
|
| 208 |
+
date_min: Optional[str],
|
| 209 |
+
date_max: Optional[str],
|
| 210 |
+
group_by: str, # "all" / "category" / "item"
|
| 211 |
+
_force_groups: Optional[List[str]] = None, # ページ分割用
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 212 |
) -> Optional[go.Figure]:
|
| 213 |
if df is None or not process_name:
|
| 214 |
return None
|
| 215 |
recs = process_map.get(process_name, [])
|
| 216 |
if not recs:
|
| 217 |
return None
|
| 218 |
+
selected = set([normalize(x) for x in (selected_items or [])])
|
| 219 |
+
recs = [r for r in recs if normalize(r["item"]) in selected]
|
| 220 |
if not recs:
|
| 221 |
return None
|
| 222 |
|
|
|
|
| 229 |
return None
|
| 230 |
|
| 231 |
thr_lookup = build_threshold_lookup(thr_df) if thr_mode == "excel" else {}
|
| 232 |
+
keyfunc = _group_key_func(group_by)
|
| 233 |
|
| 234 |
+
# グループ化(カテゴリ / 項目 / 一括ALL)
|
| 235 |
groups: Dict[str, List[dict]] = {}
|
| 236 |
for r in recs:
|
| 237 |
+
groups.setdefault(keyfunc(r), []).append(r)
|
| 238 |
+
|
| 239 |
+
group_names = list(groups.keys()) if _force_groups is None else _force_groups
|
| 240 |
+
if not group_names:
|
| 241 |
return None
|
| 242 |
|
| 243 |
+
rows = len(group_names)
|
|
|
|
| 244 |
if rows <= 1:
|
| 245 |
vspace = 0.03
|
| 246 |
else:
|
| 247 |
+
max_vs = (1.0 / (rows - 1)) - 1e-4
|
| 248 |
vspace = max(0.0, min(0.03, max_vs))
|
| 249 |
|
| 250 |
+
# サブタイトル
|
| 251 |
+
if group_by == "all":
|
| 252 |
+
subtitles = [f"{process_name} | すべての項目"] # 1行
|
| 253 |
+
elif group_by == "category":
|
| 254 |
+
subtitles = [f"{process_name} | 分類: {g}" for g in group_names]
|
| 255 |
+
else: # item
|
| 256 |
+
subtitles = [f"{process_name} | 項目: {g}" for g in group_names]
|
| 257 |
+
|
| 258 |
fig = make_subplots(
|
| 259 |
rows=rows, cols=1, shared_xaxes=True,
|
| 260 |
vertical_spacing=vspace,
|
| 261 |
+
subplot_titles=subtitles
|
| 262 |
)
|
| 263 |
|
| 264 |
+
# 各グループを1行にまとめて複数系列として描画
|
| 265 |
row_idx = 1
|
| 266 |
+
for gname in group_names:
|
| 267 |
+
cols = groups.get(gname, [])
|
| 268 |
for r in cols:
|
| 269 |
col = r["col_tuple"]
|
| 270 |
col_str = r["col_str"]
|
|
|
|
| 314 |
),
|
| 315 |
row=row_idx, col=1
|
| 316 |
)
|
| 317 |
+
# しきい値ガイドはグループ行に対して一律ではなく、系列ごとに別値になるので省略
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 318 |
row_idx += 1
|
| 319 |
|
| 320 |
fig.update_layout(
|
| 321 |
+
title=(
|
| 322 |
+
f"{process_name} | "
|
| 323 |
+
+ ("一括表示" if group_by == "all"
|
| 324 |
+
else "分類別表示(カテゴリ)" if group_by == "category"
|
| 325 |
+
else "個別表示(項目)")
|
| 326 |
+
),
|
| 327 |
xaxis_title="timestamp",
|
| 328 |
showlegend=True,
|
| 329 |
margin=dict(l=10, r=10, t=40, b=10),
|
| 330 |
hovermode="x unified",
|
| 331 |
+
height=max(420, 260 * rows),
|
| 332 |
)
|
| 333 |
return fig
|
| 334 |
|
| 335 |
+
# ページ分割(group_byごと)
|
| 336 |
+
def make_grouped_figure_paged(
|
| 337 |
df: pd.DataFrame,
|
| 338 |
process_map: Dict[str, List[dict]],
|
| 339 |
process_name: str,
|
|
|
|
| 343 |
date_min: Optional[str],
|
| 344 |
date_max: Optional[str],
|
| 345 |
page: int,
|
| 346 |
+
per_page: int,
|
| 347 |
+
group_by: str, # "category" or "item"
|
| 348 |
) -> Tuple[Optional[go.Figure], int, List[str]]:
|
|
|
|
| 349 |
recs = process_map.get(process_name, [])
|
| 350 |
if not recs:
|
| 351 |
return None, 0, []
|
| 352 |
+
selected = set([normalize(x) for x in (selected_items or [])])
|
| 353 |
+
recs = [r for r in recs if normalize(r["item"]) in selected]
|
| 354 |
if not recs:
|
| 355 |
return None, 0, []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 356 |
|
| 357 |
+
keyfunc = _group_key_func(group_by)
|
| 358 |
groups: Dict[str, List[dict]] = {}
|
| 359 |
for r in recs:
|
| 360 |
+
groups.setdefault(keyfunc(r), []).append(r)
|
| 361 |
+
all_names = list(groups.keys())
|
| 362 |
+
total_pages = max(1, int(np.ceil(len(all_names) / max(1, per_page))))
|
| 363 |
+
page = int(max(1, min(page, total_pages)))
|
| 364 |
+
start = (page - 1) * per_page
|
| 365 |
+
end = start + per_page
|
| 366 |
+
names_slice = all_names[start:end]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 367 |
|
| 368 |
+
fig = make_grouped_figure(
|
| 369 |
+
df, process_map, process_name, selected_items, thr_df, thr_mode,
|
| 370 |
+
date_min, date_max, group_by=group_by, _force_groups=names_slice
|
| 371 |
+
)
|
| 372 |
+
return fig, total_pages, all_names
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
|
| 374 |
# ======================================
|
| 375 |
# グローバル状態(UI間共有)
|
|
|
|
| 392 |
df = try_read_csv_3header(DEFAULT_CSV_PATH)
|
| 393 |
G_DF = df
|
| 394 |
G_PROCESS_MAP, G_PROCESSES = build_index_maps(df)
|
| 395 |
+
return (
|
| 396 |
+
f"✅ 既定CSVを読み込みました: {DEFAULT_CSV_PATH}",
|
| 397 |
+
gr.update(choices=G_PROCESSES, value=(G_PROCESSES[0] if G_PROCESSES else None)),
|
| 398 |
+
G_PROCESSES
|
| 399 |
+
)
|
| 400 |
except Exception as e:
|
| 401 |
return f"⚠ 既定CSV読み込み失敗: {e}", gr.update(), []
|
| 402 |
return "ℹ CSVをアップロードしてください。", gr.update(), []
|
|
|
|
| 412 |
df = try_read_csv_3header(file.name if hasattr(file, "name") else file)
|
| 413 |
G_DF = df
|
| 414 |
G_PROCESS_MAP, G_PROCESSES = build_index_maps(df)
|
| 415 |
+
return (
|
| 416 |
+
f"✅ CSV読み込み: {df.shape[0]}行 × {df.shape[1]}列",
|
| 417 |
+
gr.update(choices=G_PROCESSES, value=(G_PROCESSES[0] if G_PROCESSES else None)),
|
| 418 |
+
G_PROCESSES
|
| 419 |
+
)
|
| 420 |
except Exception as e:
|
| 421 |
return f"❌ 読み込みエラー: {e}", gr.update(choices=[]), []
|
| 422 |
|
|
|
|
| 446 |
# デフォルトは全選択
|
| 447 |
return gr.update(choices=items, value=items)
|
| 448 |
|
| 449 |
+
def render_any(process_name: str, items: List[str], display_mode: str, thr_mode_label: str,
|
| 450 |
+
date_min, date_max, page: int, per_page: int):
|
| 451 |
"""
|
| 452 |
+
表示モードに応じて Plot を返す。
|
| 453 |
+
- 一括表示: 全選択項目を1枚の行(ALL)にまとめる
|
| 454 |
+
- 分類別表示: 末尾カテゴリごとにサブプロット。多い場合はページ分割
|
| 455 |
+
- 個別表示: 項目ごとにサブプロット。多い場合はページ分割
|
| 456 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 457 |
if G_DF is None:
|
| 458 |
return "⚠ データ未読み込み", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 459 |
if not process_name:
|
|
|
|
| 463 |
|
| 464 |
mode = "excel" if str(thr_mode_label).startswith("excel") else "auto"
|
| 465 |
|
| 466 |
+
# 一括表示
|
| 467 |
+
if str(display_mode).startswith("一括"):
|
| 468 |
+
fig = make_grouped_figure(
|
| 469 |
+
G_DF, G_PROCESS_MAP, process_name, items, G_THRESHOLDS_DF, mode, date_min, date_max, group_by="all"
|
| 470 |
+
)
|
| 471 |
if fig is None:
|
| 472 |
return "⚠ 図を生成できませんでした(データ無し or 条件不一致)", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 473 |
+
return "✅ 一括表示を描画しました", gr.update(value=fig, visible=True), gr.update(visible=False), gr.update(visible=False)
|
| 474 |
+
|
| 475 |
+
# 分類別表示(カテゴリ)
|
| 476 |
+
if str(display_mode).startswith("分類"):
|
| 477 |
+
fig, total_pages, all_names = make_grouped_figure_paged(
|
| 478 |
+
G_DF, G_PROCESS_MAP, process_name, items, G_THRESHOLDS_DF, mode,
|
| 479 |
+
date_min, date_max, page=int(page), per_page=int(per_page), group_by="category"
|
| 480 |
)
|
| 481 |
if fig is None:
|
| 482 |
return "⚠ 図を生成できませんでした(データ無し or 条件不一致)", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 483 |
+
info = f"分類(カテゴリ)数: {len(all_names)} | ページ {int(max(1,min(page, total_pages)))} / {total_pages} | 件/ページ={int(per_page)}"
|
| 484 |
+
return "✅ 分類別表示(末尾語カテゴリ)を描画しました", gr.update(value=fig, visible=True), gr.update(value=info, visible=True), gr.update(visible=True)
|
| 485 |
+
|
| 486 |
+
# 個別表示(項目)
|
| 487 |
+
fig, total_pages, all_names = make_grouped_figure_paged(
|
| 488 |
+
G_DF, G_PROCESS_MAP, process_name, items, G_THRESHOLDS_DF, mode,
|
| 489 |
+
date_min, date_max, page=int(page), per_page=int(per_page), group_by="item"
|
| 490 |
+
)
|
| 491 |
+
if fig is None:
|
| 492 |
+
return "⚠ 図を生成できませんでした(データ無し or 条件不一致)", gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
|
| 493 |
+
info = f"項目数: {len(all_names)} | ページ {int(max(1,min(page, total_pages)))} / {total_pages} | 件/ページ={int(per_page)}"
|
| 494 |
+
return "✅ 個別表示(項目)を描画しました", gr.update(value=fig, visible=True), gr.update(value=info, visible=True), gr.update(visible=True)
|
| 495 |
|
| 496 |
# ======================================
|
| 497 |
# UI
|
|
|
|
| 503 |
with gr.Blocks(css="""
|
| 504 |
.gradio-container {overflow: auto !important;}
|
| 505 |
""") as demo:
|
| 506 |
+
gr.Markdown("## トレンドグラフ専用アプリ(3行ヘッダー対応・プロセス別・分類/個別・閾値色分け)")
|
| 507 |
|
| 508 |
with gr.Row():
|
| 509 |
csv_uploader = gr.File(label="① 時系列CSV(3行ヘッダー)", file_count="single", file_types=[".csv"])
|
|
|
|
| 520 |
|
| 521 |
# 表示形式の切り替え
|
| 522 |
display_mode = gr.Radio(
|
| 523 |
+
["一括表示", "分類別表示(カテゴリ)", "個別表示(項目)"],
|
| 524 |
+
value="一括表示",
|
| 525 |
label="表示形式"
|
| 526 |
)
|
| 527 |
|
|
|
|
| 536 |
btn_render = gr.Button("トレンド図を生成", variant="primary")
|
| 537 |
|
| 538 |
msg = gr.Markdown()
|
| 539 |
+
plot = gr.Plot(label="トレンド図", visible=True)
|
| 540 |
+
|
| 541 |
+
# ページ分割コントロール(分類別/個別のみ表示)
|
| 542 |
with gr.Row():
|
| 543 |
+
per_page = gr.Slider(1, 12, value=8, step=1, label="件/ページ(分類別・個別)", visible=False)
|
| 544 |
page_no = gr.Number(value=1, label="ページ(1〜)", precision=0, visible=False)
|
| 545 |
page_info = gr.Markdown(visible=False)
|
| 546 |
|
|
|
|
|
|
|
|
|
|
| 547 |
# 2) CSVアップロードで更新
|
| 548 |
csv_uploader.change(
|
| 549 |
on_csv_upload,
|
|
|
|
| 567 |
|
| 568 |
# 5) 図生成
|
| 569 |
btn_render.click(
|
| 570 |
+
fn=lambda proc, items, disp_mode, mode, dmin, dmax, p, pp:
|
| 571 |
+
render_any(proc, items, disp_mode, mode, dmin, dmax, p, pp),
|
| 572 |
+
inputs=[process_dd, items_cb, display_mode, thr_mode, date_min, date_max, page_no, per_page],
|
| 573 |
outputs=[msg, plot, page_info, page_no],
|
| 574 |
)
|
| 575 |
|
| 576 |
+
# 6) 表示形式に応じたコントロール表示切替
|
| 577 |
def _toggle_page_controls(mode):
|
| 578 |
+
show = not str(mode).startswith("一括")
|
| 579 |
return gr.update(visible=show), gr.update(visible=show), gr.update(visible=show)
|
| 580 |
display_mode.change(
|
| 581 |
_toggle_page_controls,
|
| 582 |
inputs=[display_mode],
|
| 583 |
+
outputs=[per_page, page_no, page_info],
|
| 584 |
)
|
| 585 |
|
|
|
|
| 586 |
if __name__ == "__main__":
|
| 587 |
+
# SSRオフ(Plotly埋め込みや再描画の安定化のため)
|
| 588 |
demo.launch(ssr_mode=False)
|