Renecto's picture
upload: app.py
6e1c017 verified
"""
Level Bridge Chat -- Main app
Gradio Chat UI + FastAPI Bridge API endpoint, colocated.
Embedding (iframe):
<iframe src="https://your-space.hf.space?campaign_name=X&industry=EC&cvr=2.1" ...></iframe>
"""
from __future__ import annotations
import os
import base64
from pathlib import Path
import gradio as gr
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
from bridge_models import BridgeRequest, DashboardContext, Metrics
from bridge_service import process_request
from session_store import store
# ---------------------------------------------------------------------------
# FastAPI app
# ---------------------------------------------------------------------------
fastapi_app = FastAPI(title="Level Bridge Chat API")
@fastapi_app.post("/api/chat/bridge")
async def bridge_endpoint(request: Request):
body = await request.json()
try:
req = BridgeRequest(**body)
except Exception as e:
return JSONResponse(
status_code=422,
content={"ok": False, "error_code": "VALIDATION_ERROR", "message": str(e)},
)
result = process_request(req)
return JSONResponse(content=result.model_dump())
@fastapi_app.get("/healthz")
async def healthz():
return {"ok": True, "sessions": store.count()}
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _parse_float(value: str | None) -> float | None:
if value is None:
return None
try:
return float(str(value).replace("%", "").replace("円", "").strip())
except ValueError:
return None
def _image_to_base64(image_path: str | None) -> str | None:
if not image_path:
return None
try:
with open(image_path, "rb") as f:
data = f.read()
ext = Path(image_path).suffix.lower().lstrip(".")
mime = {"jpg": "jpeg", "jpeg": "jpeg", "png": "png", "webp": "webp", "gif": "gif"}.get(ext, "png")
return f"data:image/{mime};base64," + base64.b64encode(data).decode("utf-8")
except Exception:
return None
def _format_response(result) -> str:
"""Format BridgeResponse or BridgeErrorResponse as markdown chat message."""
if not result.ok:
msg = f"**エラー**: {result.message}"
if hasattr(result, "fallback") and result.fallback and result.fallback.get("next_level_preview"):
nlp = result.fallback["next_level_preview"]
if nlp.get("needed_info"):
items = ", ".join(i["label"] for i in nlp["needed_info"])
msg += f"\n\n次に **{items}** を追加すると提案が可能になります。"
return msg
b = result.best_now
n = result.next_level_preview
level_label = {"level1": "Lv.1(基本情報)", "level2": "Lv.2(数値あり)", "level3": "Lv.3(画像あり)"}.get(
result.inferred_level, result.inferred_level
)
confidence_label = {"low": "低", "mid": "中", "high": "高"}.get(b.confidence, b.confidence)
lines = [
f"### 現在の提案 [{level_label} / 確信度: {confidence_label}]",
"",
b.summary,
"",
"**推奨アクション**",
]
for i, action in enumerate(b.actions, 1):
lines.append(f"{i}. {action}")
if n.next_level:
lines += [
"",
"---",
f"### 次レベル({n.next_level})で可能になること",
"",
]
if n.needed_info:
items = "、".join(f"`{i.label}`(例: {i.example})" for i in n.needed_info)
lines.append(f"**必要な情報**: {items}")
lines.append("")
for item in n.what_will_be_possible:
lines.append(f"- {item}")
if n.expected_impact:
lines += ["", f"*{n.expected_impact}*"]
else:
lines += ["", "---", "*全情報が揃っています。現在の提案は最高精度です。*"]
if result.follow_up_question:
lines += ["", f"**{result.follow_up_question}**"]
return "\n".join(lines)
# ---------------------------------------------------------------------------
# Gradio UI
# ---------------------------------------------------------------------------
def build_gradio_ui() -> gr.Blocks:
with gr.Blocks(title="Level Bridge Chat") as demo:
gr.Markdown("## Level Bridge Chat")
gr.Markdown(
"ダッシュボードの情報をもとに、広告改善提案を行います。"
" 情報を追加するたびに提案精度が向上します。"
)
# State
session_id_state = gr.State(None)
context_state = gr.State({})
# Level indicator
level_display = gr.Markdown("**情報レベル**: 初期化中...")
chatbot = gr.Chatbot(
label="提案チャット",
height=480,
)
with gr.Row():
msg_input = gr.Textbox(
placeholder="メッセージを入力(例: CVR 2.1%、CTR 0.8% です)",
label="メッセージ",
scale=4,
show_label=False,
)
send_btn = gr.Button("送信", variant="primary", scale=1)
with gr.Row():
image_upload = gr.Image(
label="クリエイティブ画像(任意)",
type="filepath",
height=160,
)
# --- Core chat function ---
def send_message(
message: str,
history: list,
session_id: str | None,
image_path: str | None,
ctx: dict,
):
if not message.strip() and not image_path:
return history, session_id, "", None, ctx, gr.update()
image_b64 = _image_to_base64(image_path)
# Build dashboard_context only when there are new values
dc = None
if ctx or image_b64:
metrics_obj = None
if ctx.get("cvr") or ctx.get("ctr") or ctx.get("cpa"):
metrics_obj = Metrics(
cvr=ctx.get("cvr"),
ctr=ctx.get("ctr"),
cpa=ctx.get("cpa"),
)
dc = DashboardContext(
campaign_name=ctx.get("campaign_name"),
industry=ctx.get("industry"),
metrics=metrics_obj,
image_base64=image_b64,
)
req = BridgeRequest(
session_id=session_id,
message=message,
dashboard_context=dc,
)
result = process_request(req)
response_text = _format_response(result)
new_session_id = getattr(result, "session_id", session_id)
new_level = getattr(result, "inferred_level", "level1")
level_label = {
"level1": "Lv.1(基本情報のみ)",
"level2": "Lv.2(定量データあり)",
"level3": "Lv.3(画像あり・最高精度)",
}.get(new_level, new_level)
history = history + [
{"role": "user", "content": message or "(画像を送信)"},
{"role": "assistant", "content": response_text},
]
# Clear context after first send (already stored in session)
new_ctx = {}
return (
history,
new_session_id,
"", # clear message input
None, # clear image
new_ctx,
gr.update(value=f"**情報レベル**: {level_label}"),
)
# --- Auto-initialize from URL query params ---
def on_load(request: gr.Request):
params = dict(request.query_params)
ctx = {}
if params.get("campaign_name"):
ctx["campaign_name"] = params["campaign_name"]
if params.get("industry"):
ctx["industry"] = params["industry"]
if params.get("cvr"):
ctx["cvr"] = _parse_float(params["cvr"])
if params.get("ctr"):
ctx["ctr"] = _parse_float(params["ctr"])
if params.get("cpa"):
ctx["cpa"] = _parse_float(params["cpa"])
if not ctx:
return [], None, ctx, gr.update(value="**情報レベル**: 未初期化(URLパラメータなし)")
# Auto-send Turn 1 with dashboard context
metrics_obj = None
if ctx.get("cvr") or ctx.get("ctr") or ctx.get("cpa"):
metrics_obj = Metrics(cvr=ctx.get("cvr"), ctr=ctx.get("ctr"), cpa=ctx.get("cpa"))
dc = DashboardContext(
campaign_name=ctx.get("campaign_name"),
industry=ctx.get("industry"),
metrics=metrics_obj,
)
req = BridgeRequest(session_id=None, message="", dashboard_context=dc)
result = process_request(req)
response_text = _format_response(result)
new_session_id = getattr(result, "session_id", None)
new_level = getattr(result, "inferred_level", "level1")
level_label = {
"level1": "Lv.1(基本情報のみ)",
"level2": "Lv.2(定量データあり)",
"level3": "Lv.3(画像あり・最高精度)",
}.get(new_level, new_level)
initial_history = [{"role": "assistant", "content": response_text}]
return (
initial_history,
new_session_id,
{},
gr.update(value=f"**情報レベル**: {level_label}"),
)
# Wire events
demo.load(
on_load,
inputs=None,
outputs=[chatbot, session_id_state, context_state, level_display],
)
send_btn.click(
send_message,
inputs=[msg_input, chatbot, session_id_state, image_upload, context_state],
outputs=[chatbot, session_id_state, msg_input, image_upload, context_state, level_display],
)
msg_input.submit(
send_message,
inputs=[msg_input, chatbot, session_id_state, image_upload, context_state],
outputs=[chatbot, session_id_state, msg_input, image_upload, context_state, level_display],
)
return demo
# ---------------------------------------------------------------------------
# Mount and launch
# ---------------------------------------------------------------------------
gradio_ui = build_gradio_ui()
app = gr.mount_gradio_app(fastapi_app, gradio_ui, path="/")
if __name__ == "__main__":
import uvicorn
port = int(os.environ.get("PORT", "7860"))
uvicorn.run(app, host="0.0.0.0", port=port, log_level="info")