File size: 8,386 Bytes
7e09b15 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 | """
CCPA Compliance Analyzer - FastAPI server
Uses Ollama (llama3.2:3b or similar) for LLM inference with CCPA RAG context.
Falls back to rule-based analysis if LLM is unavailable.
"""
import os
import json
import re
import logging
from contextlib import asynccontextmanager
from typing import Optional
import httpx
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from ccpa_knowledge import CCPA_SECTIONS
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# ── Config ──────────────────────────────────────────────────────────────────
OLLAMA_HOST = os.getenv("OLLAMA_HOST", "http://localhost:11434")
MODEL_NAME = os.getenv("MODEL_NAME", "llama3.2:3b")
# Full CCPA context for the LLM
CCPA_CONTEXT = "\n\n".join([
f"**{section}**:\n{text}"
for section, text in CCPA_SECTIONS.items()
])
SYSTEM_PROMPT = f"""You are a strict CCPA (California Consumer Privacy Act) compliance analyst.
Your job is to analyze business practice descriptions and determine if they violate CCPA law.
Here is the relevant CCPA statute text:
{CCPA_CONTEXT}
Rules:
1. Analyze only against CCPA violations listed above.
2. If the practice clearly violates one or more sections, output harmful=true and list ALL violated sections.
3. If the practice is compliant or unrelated to CCPA, output harmful=false and empty articles list.
4. Be strict: if there is a clear violation, flag it. Do not give benefit of the doubt for clear violations.
5. You MUST respond with ONLY a valid JSON object. No explanation, no markdown, no extra text.
Response format (ONLY THIS, nothing else):
{{"harmful": true, "articles": ["Section 1798.XXX", "Section 1798.YYY"]}}
or
{{"harmful": false, "articles": []}}"""
# ── Lifespan ─────────────────────────────────────────────────────────────────
@asynccontextmanager
async def lifespan(app: FastAPI):
logger.info("Starting CCPA Compliance Analyzer...")
# Warm up Ollama connection
try:
async with httpx.AsyncClient(timeout=30) as client:
resp = await client.get(f"{OLLAMA_HOST}/api/tags")
logger.info(f"Ollama available: {resp.status_code == 200}")
except Exception as e:
logger.warning(f"Ollama not available at startup: {e}")
yield
logger.info("Shutting down...")
app = FastAPI(title="CCPA Compliance Analyzer", lifespan=lifespan)
# ── Models ───────────────────────────────────────────────────────────────────
class AnalyzeRequest(BaseModel):
prompt: str
class AnalyzeResponse(BaseModel):
harmful: bool
articles: list[str]
# ── Rule-based fallback ───────────────────────────────────────────────────────
def rule_based_analyze(prompt: str) -> dict:
"""Deterministic rule-based CCPA violation detector as fallback."""
p = prompt.lower()
found_sections = set()
# 1798.100 - Undisclosed collection
if "privacy policy" in p and any(k in p for k in ["doesn't mention", "does not mention", "without mentioning", "not mention"]):
found_sections.add("Section 1798.100")
if ("without informing" in p or "without notice" in p) and "collect" in p:
found_sections.add("Section 1798.100")
# 1798.105 - Ignoring deletion
if any(k in p for k in ["ignoring", "ignore", "refusing", "keeping all", "not comply"]):
if any(k in p for k in ["deletion", "delete", "removal", "request"]):
found_sections.add("Section 1798.105")
# 1798.120 - Selling without opt-out / minor consent
if any(k in p for k in ["selling", "sell", "sharing"]):
if "without" in p and any(k in p for k in ["opt-out", "opt out", "informing", "notice", "consent"]):
found_sections.add("Section 1798.120")
if "without informing" in p or "without notice" in p:
found_sections.add("Section 1798.100")
if any(k in p for k in ["14-year", "13-year", "minor", "child", "underage", "under 16", "under 13"]):
found_sections.add("Section 1798.120")
# 1798.125 - Discriminatory pricing
if any(k in p for k in ["higher price", "charge more", "discriminat"]):
found_sections.add("Section 1798.125")
if "opted out" in p and any(k in p for k in ["price", "pricing", "charge"]):
found_sections.add("Section 1798.125")
# 1798.121 - Sensitive data misuse
if any(k in p for k in ["sensitive personal information", "biometric data", "precise geolocation"]):
if any(k in p for k in ["without consent", "without notice", "without informing", "without authorization"]):
found_sections.add("Section 1798.121")
harmful = len(found_sections) > 0
return {"harmful": harmful, "articles": sorted(list(found_sections))}
# ── LLM analysis ──────────────────────────────────────────────────────────────
async def llm_analyze(prompt: str) -> Optional[dict]:
"""Call Ollama LLM for CCPA analysis."""
payload = {
"model": MODEL_NAME,
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": f"Analyze this business practice for CCPA violations:\n\n{prompt}"}
],
"stream": False,
"options": {
"temperature": 0.0,
"num_predict": 200,
}
}
try:
async with httpx.AsyncClient(timeout=90) as client:
resp = await client.post(f"{OLLAMA_HOST}/api/chat", json=payload)
resp.raise_for_status()
data = resp.json()
content = data.get("message", {}).get("content", "")
logger.info(f"LLM raw response: {content[:200]}")
# Extract JSON from response
# Try direct parse
try:
result = json.loads(content.strip())
if "harmful" in result and "articles" in result:
return result
except:
pass
# Try regex extraction
match = re.search(r'\{[^{}]+\}', content, re.DOTALL)
if match:
try:
result = json.loads(match.group())
if "harmful" in result and "articles" in result:
return result
except:
pass
logger.warning("Could not parse LLM response as JSON")
return None
except Exception as e:
logger.warning(f"LLM call failed: {e}")
return None
# ── Endpoints ─────────────────────────────────────────────────────────────────
@app.get("/health")
async def health():
return {"status": "ok"}
@app.post("/analyze")
async def analyze(request: AnalyzeRequest):
logger.info(f"Analyzing: {request.prompt[:100]}")
# Try LLM first
result = await llm_analyze(request.prompt)
if result is None:
logger.info("Falling back to rule-based analysis")
result = rule_based_analyze(request.prompt)
# Ensure correct types
harmful = bool(result.get("harmful", False))
articles = list(result.get("articles", []))
# Enforce logic rules
if harmful and len(articles) == 0:
# LLM said harmful but no articles - use rule-based to find articles
rb = rule_based_analyze(request.prompt)
if rb["articles"]:
articles = rb["articles"]
else:
# Default to most common violation
articles = ["Section 1798.100"]
if not harmful:
articles = []
response = {"harmful": harmful, "articles": articles}
logger.info(f"Result: {response}")
return JSONResponse(content=response)
|