rishabh5752 commited on
Commit
38b9492
·
verified ·
1 Parent(s): e3caa1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -52
app.py CHANGED
@@ -1,21 +1,17 @@
1
- """
2
- Governance‑GPT Quiz · Instruction‑tuned FLAN‑T5 summary with full logging
3
- """
4
-
5
  import datetime, tempfile, re, traceback
6
  import gradio as gr
7
  import pandas as pd
8
  from fpdf import FPDF
9
  from transformers import pipeline
10
 
11
- print("[INIT] loading FLANT5base … (≈20‑30 s on first start)")
12
  summariser = pipeline(
13
  "text2text-generation",
14
  model="google/flan-t5-base",
15
  tokenizer="google/flan-t5-base",
16
  max_new_tokens=180,
17
  )
18
- print("[INIT] FLAN ready")
19
 
20
  QUESTIONS = [
21
  "Governance framework is documented and communicated across the organisation.",
@@ -51,50 +47,46 @@ TIERS = {
51
  "Optimized": (4.51,5.00),
52
  }
53
 
54
- def score_to_tier(x): # helper
55
  for t,(lo,hi) in TIERS.items():
56
- if lo<=x<=hi:
57
- return t
58
  return "Unclassified"
59
 
60
- def latin1(t): # PDF font safety
61
  return (t.replace("–","-").replace("—","-").replace("•","-")
62
  .encode("latin-1","replace").decode("latin-1"))
63
 
64
- def llm_remediation(product,b_avgs,overall_tier):
65
- bucket_txt="\n".join(f"- {b}: {v:.2f}" for b,v in b_avgs.items())
66
  prompt = (
67
- "You are an AI governance consultant.\n"
68
- f"Product: {product}\n"
69
- f"Overall tier: {overall_tier}\n"
70
- "Bucket scores:\n"
71
- f"{bucket_txt}\n\n"
72
- "Provide:\n"
73
- "1) One-sentence overall assessment.\n"
74
- "2) 3‑5 bullet remediation actions referencing bucket names.\n"
75
- "### Response:\n"
76
  )
77
  try:
78
  print("[LLM] prompt >>>\n", prompt)
79
- raw = summariser(prompt, temperature=0)[0]["generated_text"]
80
  print("[LLM] raw >>>\n", raw)
81
  except Exception as e:
82
  print("[LLM] ERROR", e)
83
  traceback.print_exc()
84
  return "LLM summary unavailable."
85
 
86
- # keep only text after the marker
87
- cleaned = raw.split("### Response:")[-1].strip().replace("•","- ")
88
- # very short => fallback
89
- if len(cleaned) < 20:
90
- cleaned = (f"{product} is at a '{overall_tier}' maturity level. "
91
- "Focus on formalising governance charters, embedding privacy impact assessments, "
92
- "and creating incident playbooks across all buckets.")
93
- print("[LLM] cleaned >>>\n", cleaned)
94
- return cleaned
 
 
 
95
 
96
  def build_pdf(product,df,avg,tier,path,summary):
97
- print("[PDF] building ")
98
  pdf=FPDF(); pdf.set_auto_page_break(auto=True,margin=15); pdf.add_page()
99
  pdf.set_font("Helvetica","B",16)
100
  pdf.cell(0,10,latin1(f"AI Governance Maturity Report - {product}"),ln=1,align="C")
@@ -104,7 +96,6 @@ def build_pdf(product,df,avg,tier,path,summary):
104
  pdf.cell(0,8,latin1(f"Overall Score: {avg:.2f} | Tier: {tier}"),ln=1)
105
  pdf.set_font("Helvetica","",11)
106
  pdf.multi_cell(0,6,latin1(summary)); pdf.ln(4)
107
-
108
  pdf.set_font("Helvetica","B",11)
109
  pdf.cell(80,8,"Bucket",1); pdf.cell(35,8,"Avg",1); pdf.cell(35,8,"Tier",1,ln=1)
110
  pdf.set_font("Helvetica","",10)
@@ -116,25 +107,26 @@ def build_pdf(product,df,avg,tier,path,summary):
116
  pdf.output(path)
117
  print("[PDF] saved at", path)
118
 
119
- def generate_report(name,*scores):
120
- product=name.strip() or "your product"
121
- scores=list(scores)
122
- b_avgs={b:sum(scores[i] for i in idx)/len(idx) for b,idx in BUCKETS.items()}
123
- avg=sum(scores)/len(scores); tier=score_to_tier(avg)
124
- df=pd.DataFrame({"Bucket":b_avgs.keys(),
125
- "Avg":b_avgs.values(),
126
- "Tier":[score_to_tier(v) for v in b_avgs.values()]})
127
- summary=llm_remediation(product,b_avgs,tier)
128
- tmp=tempfile.NamedTemporaryFile(delete=False,suffix=".pdf")
129
- build_pdf(product,df,avg,tier,tmp.name,summary)
130
- return summary,tmp.name
 
131
 
132
- with gr.Blocks(title="GovernanceGPT Quiz") as demo:
133
- gr.Markdown("# GovernanceGPT Quiz")
134
- pname=gr.Textbox(label="Product / System Name",placeholder="e.g. AcmeAI Recommender")
135
- sliders=[gr.Slider(1,5,3,1,label=q) for q in QUESTIONS]
136
- btn=gr.Button("Generate PDF Report")
137
- md_out=gr.Markdown(); file_out=gr.File(label="⬇️ Download PDF")
138
  btn.click(generate_report,[pname]+sliders,[md_out,file_out])
139
 
140
- demo.launch(share=True) # share=True gives you a public link
 
 
 
 
 
1
  import datetime, tempfile, re, traceback
2
  import gradio as gr
3
  import pandas as pd
4
  from fpdf import FPDF
5
  from transformers import pipeline
6
 
7
+ print("[INIT] loading FLAN-T5-base ...")
8
  summariser = pipeline(
9
  "text2text-generation",
10
  model="google/flan-t5-base",
11
  tokenizer="google/flan-t5-base",
12
  max_new_tokens=180,
13
  )
14
+ print("[INIT] FLAN ready.")
15
 
16
  QUESTIONS = [
17
  "Governance framework is documented and communicated across the organisation.",
 
47
  "Optimized": (4.51,5.00),
48
  }
49
 
50
+ def score_to_tier(x):
51
  for t,(lo,hi) in TIERS.items():
52
+ if lo<=x<=hi: return t
 
53
  return "Unclassified"
54
 
55
+ def latin1(t):
56
  return (t.replace("–","-").replace("—","-").replace("•","-")
57
  .encode("latin-1","replace").decode("latin-1"))
58
 
59
+ def llm_remediation(product, b_avgs, overall_tier):
60
+ bucket_lines = "\n".join(f"{b}: {v:.2f}" for b,v in b_avgs.items())
61
  prompt = (
62
+ f"Summarise the governance maturity for the product '{product}' at tier '{overall_tier}' based on these bucket scores:\n"
63
+ f"{bucket_lines}\n\n"
64
+ "First, write a one-sentence overall assessment. Then, provide 3-5 markdown bullets suggesting next actions for improvement, mentioning bucket names."
 
 
 
 
 
 
65
  )
66
  try:
67
  print("[LLM] prompt >>>\n", prompt)
68
+ raw = summariser(prompt, temperature=0.0, do_sample=False)[0]["generated_text"]
69
  print("[LLM] raw >>>\n", raw)
70
  except Exception as e:
71
  print("[LLM] ERROR", e)
72
  traceback.print_exc()
73
  return "LLM summary unavailable."
74
 
75
+ # extract bullets if present, otherwise take everything
76
+ match = re.search(r"(?s)(.+?)(?:\n|$)(- .+)", raw)
77
+ if match:
78
+ summary = match.group(1).strip() + "\n" + match.group(2).strip()
79
+ else:
80
+ summary = raw.strip()
81
+ # ensure not too short
82
+ if len(summary) < 20:
83
+ summary = (f"{product} is at '{overall_tier}' level. "
84
+ "Consider improving governance, privacy, risk and security processes in relevant buckets.")
85
+ print("[LLM] cleaned >>>\n", summary)
86
+ return summary
87
 
88
  def build_pdf(product,df,avg,tier,path,summary):
89
+ print("[PDF] building ...")
90
  pdf=FPDF(); pdf.set_auto_page_break(auto=True,margin=15); pdf.add_page()
91
  pdf.set_font("Helvetica","B",16)
92
  pdf.cell(0,10,latin1(f"AI Governance Maturity Report - {product}"),ln=1,align="C")
 
96
  pdf.cell(0,8,latin1(f"Overall Score: {avg:.2f} | Tier: {tier}"),ln=1)
97
  pdf.set_font("Helvetica","",11)
98
  pdf.multi_cell(0,6,latin1(summary)); pdf.ln(4)
 
99
  pdf.set_font("Helvetica","B",11)
100
  pdf.cell(80,8,"Bucket",1); pdf.cell(35,8,"Avg",1); pdf.cell(35,8,"Tier",1,ln=1)
101
  pdf.set_font("Helvetica","",10)
 
107
  pdf.output(path)
108
  print("[PDF] saved at", path)
109
 
110
+ def generate_report(name, *scores):
111
+ product = name.strip() or "your product"
112
+ scores = list(scores)
113
+ b_avgs = {b: sum(scores[i] for i in idx)/len(idx) for b,idx in BUCKETS.items()}
114
+ avg = sum(scores)/len(scores)
115
+ tier = score_to_tier(avg)
116
+ df = pd.DataFrame({"Bucket":b_avgs.keys(),
117
+ "Avg":b_avgs.values(),
118
+ "Tier":[score_to_tier(v) for v in b_avgs.values()]})
119
+ summary = llm_remediation(product, b_avgs, tier)
120
+ tmp = tempfile.NamedTemporaryFile(delete=False,suffix=".pdf")
121
+ build_pdf(product, df, avg, tier, tmp.name, summary)
122
+ return summary, tmp.name
123
 
124
+ with gr.Blocks(title="Governance-GPT Quiz") as demo:
125
+ gr.Markdown("# Governance-GPT Quiz")
126
+ pname = gr.Textbox(label="Product / System Name",placeholder="e.g. AcmeAI Recommender")
127
+ sliders = [gr.Slider(1,5,3,1,label=q) for q in QUESTIONS]
128
+ btn = gr.Button("Generate PDF Report")
129
+ md_out = gr.Markdown(); file_out = gr.File(label="⬇️ Download PDF")
130
  btn.click(generate_report,[pname]+sliders,[md_out,file_out])
131
 
132
+ demo.launch(share=True)