| from fastapi import FastAPI |
| from fastapi.middleware.cors import CORSMiddleware |
| from .schemas import GenerateRequest, EmailResponse, ProposalResponse, LintRequest, LintResponse |
| from .prompt_templates import EMAIL_SYSTEM, EMAIL_USER, PROPOSAL_SYSTEM, PROPOSAL_USER |
| from .openai_utils import chat_complete, split_subject_body |
| from .hf_utils import toxicity_score, sentiment_polarity |
|
|
| app = FastAPI(title="Sales Writer API", version="0.1.0") |
|
|
| |
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["http://localhost:5173"], |
| allow_credentials=True, |
| allow_methods=["*"], |
| allow_headers=["*"], |
| ) |
|
|
| @app.get("/health") |
| def health(): |
| return {"status": "ok"} |
|
|
| @app.post("/generate/email", response_model=EmailResponse) |
| async def generate_email(req: GenerateRequest): |
| user_prompt = EMAIL_USER.format(**req.model_dump()) |
| raw = chat_complete(EMAIL_SYSTEM, user_prompt) |
| subject, body = split_subject_body(raw) |
|
|
| tox = await toxicity_score(body) |
| sent = await sentiment_polarity(body) |
| warnings = [] |
| if tox > 0.2: |
| warnings.append("トーンが攻撃的/不適切の可能性があります。表現を柔らかくしてください。") |
|
|
| quality = {"toxicity": tox, "sentiment": sent} |
| return EmailResponse(subject=subject, body=body, quality=quality, warnings=warnings) |
|
|
| @app.post("/generate/proposal", response_model=ProposalResponse) |
| async def generate_proposal(req: GenerateRequest): |
| user_prompt = PROPOSAL_USER.format(**req.model_dump()) |
| text = chat_complete(PROPOSAL_SYSTEM, user_prompt) |
| |
| lines = [ln.strip("-• ").strip() for ln in text.splitlines() if ln.strip()] |
| outline = [ln for ln in lines if not ln.startswith("エグゼクティブサマリー")] |
| |
| summary = "" |
| for i, ln in enumerate(lines): |
| if "エグゼクティブサマリー" in ln: |
| summary = "\n".join(lines[i+1:]) |
| break |
| return ProposalResponse(outline=outline[:8], executive_summary=summary or "(サマリー抽出に失敗しました)") |
|
|
| @app.post("/lint", response_model=LintResponse) |
| async def lint_text(req: LintRequest): |
| tox = await toxicity_score(req.text) |
| issues = [] |
| if tox > 0.2: |
| issues.append("不適切・攻撃的な表現を含む可能性") |
| |
| bad_patterns = ["絶対に", "必ず儲かる", "今だけ", "無料で全部"] |
| if any(p in req.text for p in bad_patterns): |
| issues.append("誇大広告と見なされる恐れのある表現") |
| return LintResponse(issues=issues, toxicity=tox) |
|
|