File size: 6,381 Bytes
37c30bc eaf6ff2 f702f5f d9733a9 07adbc0 eaf6ff2 1d24f39 eaf6ff2 1649b2b 0481dfa eaf6ff2 f702f5f ee78979 73e5422 eaf6ff2 ee78979 37c30bc ee78979 eaf6ff2 d9733a9 37c30bc d9733a9 f702f5f 37c30bc eaf6ff2 37c30bc d9733a9 e6ef78f 37c30bc eaf6ff2 37c30bc 9c2958a 37c30bc eaf6ff2 37c30bc e3caa1c 1d24f39 1649b2b 37c30bc e3caa1c 37c30bc 1d24f39 1649b2b b3244f5 37c30bc b3244f5 eaf6ff2 1d24f39 1649b2b 1d24f39 b3244f5 1d24f39 b9523e1 eaf6ff2 54aa9cf eaf6ff2 9c2958a 37c30bc eaf6ff2 37c30bc 9c2958a 38b9492 eaf6ff2 37c30bc 38b9492 54aa9cf 38b9492 54aa9cf 37c30bc 54aa9cf 38b9492 54aa9cf 38b9492 5eb2974 eaf6ff2 38b9492 54aa9cf 38b9492 37c30bc 54aa9cf 5eb2974 eaf6ff2 37c30bc eaf6ff2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
# app.py
import os
import datetime
import tempfile
import traceback
import gradio as gr
import pandas as pd
from fpdf import FPDF
import openai
# Load API key from environment
openai.api_key = os.getenv("OPENAI_API_KEY", "")
# Governance assessment questions
QUESTIONS = [
"Governance framework is documented and communicated across the organisation.",
"Roles & responsibilities for AI oversight are clearly assigned.",
"KPIs link AI outcomes to business & societal value.",
"Data lineage is captured and auditable for all production models.",
"Privacy impact assessments are performed before every new AI use-case.",
"Bias / fairness metrics are monitored post-deployment.",
"Incident response playbooks cover AI system failures & ethics breaches.",
"Third-party models and datasets are licensed and risk-assessed.",
"Governance performance is reviewed by senior leadership at least quarterly.",
"Security controls protect model artefacts and inference endpoints.",
"Continuous training keeps staff aware of AI policy updates.",
"Explainability techniques are applied commensurate with model impact.",
"Human-in-the-loop overrides exist for high-risk decisions.",
"End-of-life or rollback criteria are defined for all models.",
"Model cards or equivalent documentation exist for each deployed model.",
]
# Map buckets to question indices
BUCKETS = {
"Governance & Strategy": [0, 1, 2],
"Data & Privacy": [3, 4, 5],
"Risk & Compliance": [6, 7, 8],
"Security & Infrastructure": [9, 10, 11],
"Lifecycle & Oversight": [12, 13, 14],
}
# Score thresholds per tier
TIERS = {
"Initial": (1.00, 2.00),
"Repeatable": (2.01, 2.50),
"Defined": (2.51, 3.50),
"Managed": (3.51, 4.50),
"Optimized": (4.51, 5.00),
}
def score_to_tier(x: float) -> str:
# Match score to tier range
for t, (lo, hi) in TIERS.items():
if lo <= x <= hi:
return t
return "Unclassified"
def latin1(t: str) -> str:
# Normalize special characters for PDF encoding
return (
t.replace("–", "-").replace("—", "-").replace("•", "-")
.encode("latin-1", "replace").decode("latin-1")
)
def llm_remediation(product: str, b_avgs: dict, overall_tier: str) -> str:
# Generate summary using OpenAI or fallback if needed
bucket_lines = "\n".join(f"{b}: {v:.2f}" for b, v in b_avgs.items())
prompt = (
f"Summarise the governance maturity for the product '{product}' at tier '{overall_tier}' "
f"based on these bucket scores:\n{bucket_lines}\n\n"
"First, write a one-sentence overall assessment. Then, provide 3-5 markdown bullets "
"suggesting next actions for improvement, mentioning bucket names."
)
try:
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are an expert in AI governance maturity."},
{"role": "user", "content": prompt},
],
max_tokens=300,
temperature=0.7,
)
out = (response["choices"][0]["message"]["content"] or "").strip()
if len(out) > 20:
return out
except Exception as e:
print("[OpenAI] ERROR:", e)
traceback.print_exc()
# Fallback recommendations if API call fails
bullets = []
for bucket, avg in b_avgs.items():
tier = score_to_tier(avg)
if tier in ["Initial", "Repeatable", "Defined"]:
bullets.append(f"- {bucket}: Formalize policies, clarify owners, and address compliance gaps.")
elif tier == "Managed":
bullets.append(f"- {bucket}: Benchmark against peers and automate monitoring.")
else:
bullets.append(f"- {bucket}: Continue to optimize and share best practices.")
return f"{product} is at '{overall_tier}' maturity. Focus on the following improvements:\n" + "\n".join(bullets)
def build_pdf(product, df, avg, tier, path, summary):
# Create PDF report with scores and summary
pdf = FPDF()
pdf.set_auto_page_break(auto=True, margin=15)
pdf.add_page()
pdf.set_font("Helvetica", "B", 16)
pdf.cell(0, 10, latin1(f"AI Governance Maturity Report - {product}"), ln=1, align="C")
pdf.set_font("Helvetica", "", 12)
pdf.cell(0, 8, datetime.date.today().isoformat(), ln=1, align="C")
pdf.ln(4)
pdf.set_font("Helvetica", "B", 12)
pdf.cell(0, 8, latin1(f"Overall Score: {avg:.2f} | Tier: {tier}"), ln=1)
pdf.set_font("Helvetica", "", 11)
pdf.multi_cell(0, 6, latin1(summary))
pdf.ln(4)
pdf.set_font("Helvetica", "B", 11)
pdf.cell(80, 8, "Bucket", 1); pdf.cell(35, 8, "Avg", 1); pdf.cell(35, 8, "Tier", 1, ln=1)
pdf.set_font("Helvetica", "", 10)
for _, r in df.iterrows():
pdf.cell(80, 8, latin1(str(r.Bucket)[:40]), 1)
pdf.cell(35, 8, f"{float(r.Avg):.2f}", 1)
pdf.cell(35, 8, str(r.Tier), 1, ln=1)
pdf.cell(80, 8, "Overall", 1); pdf.cell(35, 8, f"{avg:.2f}", 1); pdf.cell(35, 8, tier, 1, ln=1)
pdf.output(path)
def generate_report(name, *scores):
# Full evaluation pipeline: scoring → LLM → PDF
product = (name or "").strip() or "your product"
scores = list(scores)
b_avgs = {b: sum(scores[i] for i in idx) / len(idx) for b, idx in BUCKETS.items()}
avg = sum(scores) / len(scores)
tier = score_to_tier(avg)
df = pd.DataFrame({
"Bucket": list(b_avgs.keys()),
"Avg": list(b_avgs.values()),
"Tier": [score_to_tier(v) for v in b_avgs.values()],
})
summary = llm_remediation(product, b_avgs, tier)
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
build_pdf(product, df, avg, tier, tmp.name, summary)
return summary, tmp.name
# UI layout using Gradio
with gr.Blocks(title="Governance-GPT Quiz") as demo:
gr.Markdown("# Governance-GPT Quiz")
pname = gr.Textbox(label="Product / System Name", placeholder="e.g. AcmeAI Recommender")
sliders = [gr.Slider(1, 5, 3, 1, label=q) for q in QUESTIONS]
btn = gr.Button("Generate PDF Report")
md_out = gr.Markdown()
file_out = gr.File(label="⬇️ Download PDF")
btn.click(generate_report, [pname] + sliders, [md_out, file_out])
# Start the app
if __name__ == "__main__":
demo.launch()
|