rishabh5752 commited on
Commit
54aa9cf
·
verified ·
1 Parent(s): 38b9492

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -43
app.py CHANGED
@@ -1,17 +1,8 @@
1
- import datetime, tempfile, re, traceback
2
  import gradio as gr
3
  import pandas as pd
4
  from fpdf import FPDF
5
- from transformers import pipeline
6
-
7
- print("[INIT] loading FLAN-T5-base ...")
8
- summariser = pipeline(
9
- "text2text-generation",
10
- model="google/flan-t5-base",
11
- tokenizer="google/flan-t5-base",
12
- max_new_tokens=180,
13
- )
14
- print("[INIT] FLAN ready.")
15
 
16
  QUESTIONS = [
17
  "Governance framework is documented and communicated across the organisation.",
@@ -49,7 +40,8 @@ TIERS = {
49
 
50
  def score_to_tier(x):
51
  for t,(lo,hi) in TIERS.items():
52
- if lo<=x<=hi: return t
 
53
  return "Unclassified"
54
 
55
  def latin1(t):
@@ -57,32 +49,55 @@ def latin1(t):
57
  .encode("latin-1","replace").decode("latin-1"))
58
 
59
  def llm_remediation(product, b_avgs, overall_tier):
60
- bucket_lines = "\n".join(f"{b}: {v:.2f}" for b,v in b_avgs.items())
 
61
  prompt = (
62
- f"Summarise the governance maturity for the product '{product}' at tier '{overall_tier}' based on these bucket scores:\n"
63
  f"{bucket_lines}\n\n"
64
  "First, write a one-sentence overall assessment. Then, provide 3-5 markdown bullets suggesting next actions for improvement, mentioning bucket names."
65
  )
66
- try:
67
- print("[LLM] prompt >>>\n", prompt)
68
- raw = summariser(prompt, temperature=0.0, do_sample=False)[0]["generated_text"]
69
- print("[LLM] raw >>>\n", raw)
70
- except Exception as e:
71
- print("[LLM] ERROR", e)
72
- traceback.print_exc()
73
- return "LLM summary unavailable."
74
 
75
- # extract bullets if present, otherwise take everything
76
- match = re.search(r"(?s)(.+?)(?:\n|$)(- .+)", raw)
77
- if match:
78
- summary = match.group(1).strip() + "\n" + match.group(2).strip()
79
- else:
80
- summary = raw.strip()
81
- # ensure not too short
82
- if len(summary) < 20:
83
- summary = (f"{product} is at '{overall_tier}' level. "
84
- "Consider improving governance, privacy, risk and security processes in relevant buckets.")
85
- print("[LLM] cleaned >>>\n", summary)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  return summary
87
 
88
  def build_pdf(product,df,avg,tier,path,summary):
@@ -110,23 +125,25 @@ def build_pdf(product,df,avg,tier,path,summary):
110
  def generate_report(name, *scores):
111
  product = name.strip() or "your product"
112
  scores = list(scores)
113
- b_avgs = {b: sum(scores[i] for i in idx)/len(idx) for b,idx in BUCKETS.items()}
114
- avg = sum(scores)/len(scores)
115
  tier = score_to_tier(avg)
116
- df = pd.DataFrame({"Bucket":b_avgs.keys(),
117
- "Avg":b_avgs.values(),
118
- "Tier":[score_to_tier(v) for v in b_avgs.values()]})
 
 
119
  summary = llm_remediation(product, b_avgs, tier)
120
- tmp = tempfile.NamedTemporaryFile(delete=False,suffix=".pdf")
121
  build_pdf(product, df, avg, tier, tmp.name, summary)
122
  return summary, tmp.name
123
 
124
  with gr.Blocks(title="Governance-GPT Quiz") as demo:
125
  gr.Markdown("# Governance-GPT Quiz")
126
- pname = gr.Textbox(label="Product / System Name",placeholder="e.g. AcmeAI Recommender")
127
- sliders = [gr.Slider(1,5,3,1,label=q) for q in QUESTIONS]
128
  btn = gr.Button("Generate PDF Report")
129
  md_out = gr.Markdown(); file_out = gr.File(label="⬇️ Download PDF")
130
- btn.click(generate_report,[pname]+sliders,[md_out,file_out])
131
 
132
- demo.launch(share=True)
 
1
+ import datetime, tempfile, os, traceback
2
  import gradio as gr
3
  import pandas as pd
4
  from fpdf import FPDF
5
+ import requests
 
 
 
 
 
 
 
 
 
6
 
7
  QUESTIONS = [
8
  "Governance framework is documented and communicated across the organisation.",
 
40
 
41
  def score_to_tier(x):
42
  for t,(lo,hi) in TIERS.items():
43
+ if lo<=x<=hi:
44
+ return t
45
  return "Unclassified"
46
 
47
  def latin1(t):
 
49
  .encode("latin-1","replace").decode("latin-1"))
50
 
51
  def llm_remediation(product, b_avgs, overall_tier):
52
+ api_key = "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"
53
+ bucket_lines = "\n".join(f"{b}: {v:.2f}" for b, v in b_avgs.items())
54
  prompt = (
55
+ f"Summarise the AI governance maturity for the product '{product}' at tier '{overall_tier}' based on these bucket scores:\n"
56
  f"{bucket_lines}\n\n"
57
  "First, write a one-sentence overall assessment. Then, provide 3-5 markdown bullets suggesting next actions for improvement, mentioning bucket names."
58
  )
 
 
 
 
 
 
 
 
59
 
60
+ if api_key:
61
+ try:
62
+ print("[GPT] Sending request to OpenAI API ...")
63
+ response = requests.post(
64
+ "https://api.openai.com/v1/chat/completions",
65
+ headers={
66
+ "Authorization": f"Bearer {api_key}",
67
+ "Content-Type": "application/json"
68
+ },
69
+ json={
70
+ "model": "gpt-4o", # swap for gpt-5-nano when available
71
+ "messages": [
72
+ {"role": "system", "content": "You are an expert in AI governance maturity."},
73
+ {"role": "user", "content": prompt}
74
+ ],
75
+ "max_tokens": 200,
76
+ "temperature": 0.3,
77
+ }
78
+ )
79
+ data = response.json()
80
+ print("[GPT] raw response:", data)
81
+ out = data["choices"][0]["message"]["content"].strip()
82
+ if len(out) > 30:
83
+ return out
84
+ except Exception as e:
85
+ print("[GPT] ERROR", e)
86
+ traceback.print_exc()
87
+
88
+ bullets = []
89
+ for bucket, avg in b_avgs.items():
90
+ tier = score_to_tier(avg)
91
+ if tier in ["Initial", "Repeatable", "Defined"]:
92
+ bullets.append(f"- {bucket}: Formalize policies, clarify owners, and address compliance gaps.")
93
+ elif tier == "Managed":
94
+ bullets.append(f"- {bucket}: Benchmark against peers and automate monitoring.")
95
+ else:
96
+ bullets.append(f"- {bucket}: Continue to optimize and share best practices.")
97
+ summary = (
98
+ f"{product} is at '{overall_tier}' maturity. "
99
+ "Focus on the following improvements:\n" + "\n".join(bullets)
100
+ )
101
  return summary
102
 
103
  def build_pdf(product,df,avg,tier,path,summary):
 
125
  def generate_report(name, *scores):
126
  product = name.strip() or "your product"
127
  scores = list(scores)
128
+ b_avgs = {b: sum(scores[i] for i in idx) / len(idx) for b, idx in BUCKETS.items()}
129
+ avg = sum(scores) / len(scores)
130
  tier = score_to_tier(avg)
131
+ df = pd.DataFrame({
132
+ "Bucket": b_avgs.keys(),
133
+ "Avg": b_avgs.values(),
134
+ "Tier": [score_to_tier(v) for v in b_avgs.values()],
135
+ })
136
  summary = llm_remediation(product, b_avgs, tier)
137
+ tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
138
  build_pdf(product, df, avg, tier, tmp.name, summary)
139
  return summary, tmp.name
140
 
141
  with gr.Blocks(title="Governance-GPT Quiz") as demo:
142
  gr.Markdown("# Governance-GPT Quiz")
143
+ pname = gr.Textbox(label="Product / System Name", placeholder="e.g. AcmeAI Recommender")
144
+ sliders = [gr.Slider(1, 5, 3, 1, label=q) for q in QUESTIONS]
145
  btn = gr.Button("Generate PDF Report")
146
  md_out = gr.Markdown(); file_out = gr.File(label="⬇️ Download PDF")
147
+ btn.click(generate_report, [pname] + sliders, [md_out, file_out])
148
 
149
+ demo.launch()