rishabh5752 commited on
Commit
b9523e1
·
verified ·
1 Parent(s): 9c2958a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -16
app.py CHANGED
@@ -1,19 +1,21 @@
1
  """
2
- Governance-GPT Quiz · ASCII-safe PDF with free HF summariser
3
  """
4
 
5
- import datetime, tempfile, re
6
  import gradio as gr
7
  import pandas as pd
8
  from fpdf import FPDF
9
  from transformers import pipeline
10
 
11
- # lightweight, free CPU summariser
 
12
  summariser = pipeline(
13
  "summarization",
14
- model="sshleifer/distilbart-cnn-12-6",
15
  tokenizer="sshleifer/distilbart-cnn-12-6",
16
  )
 
17
 
18
  QUESTIONS = [
19
  "Governance framework is documented and communicated across the organisation.",
@@ -51,25 +53,43 @@ TIERS = {
51
 
52
  def score_to_tier(x):
53
  for t,(lo,hi) in TIERS.items():
54
- if lo<=x<=hi: return t
 
55
  return "Unclassified"
56
 
57
  def latin1(t):
58
- return t.replace("–","-").replace("—","-").replace("•","-")\
59
- .encode("latin-1","replace").decode("latin-1")
60
 
 
61
  def llm_remediation(product,b_avgs,overall_tier):
62
- lines="\n".join(f"- {b}: {v:.2f}" for b,v in b_avgs.items())
63
  prompt=(f"Product: {product}\nOverall tier: {overall_tier}\n"
64
- f"Bucket scores:\n{lines}\n\n"
65
  "Write one assessment sentence and 3-5 bullet remediation actions "
66
  "referencing bucket names. Return only the summary.")
67
- out=summariser(prompt,max_length=120,min_length=40,do_sample=False)[0]["summary_text"]
68
- clean="\n".join(ln for ln in out.splitlines()
69
- if not re.search(r"assessment sentence|bullet remediation",ln,flags=re.I)).strip()
70
- return clean.replace("•","- ") or "LLM summary unavailable."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  def build_pdf(product,df,avg,tier,path,summary):
 
73
  pdf=FPDF(); pdf.set_auto_page_break(auto=True,margin=15); pdf.add_page()
74
  pdf.set_font("Helvetica","B",16)
75
  pdf.cell(0,10,latin1(f"AI Governance Maturity Report - {product}"),ln=1,align="C")
@@ -90,11 +110,14 @@ def build_pdf(product,df,avg,tier,path,summary):
90
  pdf.cell(35,8,f"{avg:.2f}",1)
91
  pdf.cell(35,8,tier,1,ln=1)
92
  pdf.output(path)
 
93
 
94
  def generate_report(name,*scores):
95
- product=name.strip() or "your product"; scores=list(scores)
 
96
  b_avgs={b:sum(scores[i] for i in idx)/len(idx) for b,idx in BUCKETS.items()}
97
- avg=sum(scores)/len(scores); tier=score_to_tier(avg)
 
98
  df=pd.DataFrame({"Bucket":b_avgs.keys(),
99
  "Avg":b_avgs.values(),
100
  "Tier":[score_to_tier(v) for v in b_avgs.values()]})
@@ -111,4 +134,5 @@ with gr.Blocks(title="Governance-GPT Quiz") as demo:
111
  out_md=gr.Markdown(); out_pdf=gr.File(label="⬇️ Download PDF")
112
  btn.click(generate_report,[name]+sliders,[out_md,out_pdf])
113
 
114
- demo.launch()
 
 
1
  """
2
+ Governance-GPT Quiz · ASCII-safe PDF with logged summariser
3
  """
4
 
5
+ import datetime, tempfile, re, traceback
6
  import gradio as gr
7
  import pandas as pd
8
  from fpdf import FPDF
9
  from transformers import pipeline
10
 
11
+ # ---------- model ---------- #
12
+ print("[INIT] loading summariser …")
13
  summariser = pipeline(
14
  "summarization",
15
+ model="sshleifer/distilbart-cnn-12-6", # same model most HF demo Spaces use
16
  tokenizer="sshleifer/distilbart-cnn-12-6",
17
  )
18
+ print("[INIT] summariser ready")
19
 
20
  QUESTIONS = [
21
  "Governance framework is documented and communicated across the organisation.",
 
53
 
54
  def score_to_tier(x):
55
  for t,(lo,hi) in TIERS.items():
56
+ if lo<=x<=hi:
57
+ return t
58
  return "Unclassified"
59
 
60
  def latin1(t):
61
+ return (t.replace("–","-").replace("—","-").replace("•","-")
62
+ .encode("latin-1","replace").decode("latin-1"))
63
 
64
+ # ---------- LLM helper with logging ---------- #
65
  def llm_remediation(product,b_avgs,overall_tier):
66
+ bucket_lines="\n".join(f"- {b}: {v:.2f}" for b,v in b_avgs.items())
67
  prompt=(f"Product: {product}\nOverall tier: {overall_tier}\n"
68
+ f"Bucket scores:\n{bucket_lines}\n\n"
69
  "Write one assessment sentence and 3-5 bullet remediation actions "
70
  "referencing bucket names. Return only the summary.")
71
+ try:
72
+ print("[LLM] prompt:\n", prompt)
73
+ result = summariser(prompt,
74
+ max_length=80, # shorter than default to avoid warnings
75
+ min_length=20,
76
+ do_sample=False)[0]["summary_text"]
77
+ print("[LLM] raw output:\n", result)
78
+ except Exception as e:
79
+ print("[LLM] ERROR:", e)
80
+ traceback.print_exc()
81
+ return "LLM summary unavailable."
82
+
83
+ clean = "\n".join(
84
+ ln for ln in result.splitlines()
85
+ if not re.search(r"assessment sentence|bullet remediation", ln, re.I)
86
+ ).replace("•","- ").strip()
87
+
88
+ print("[LLM] cleaned summary:\n", clean)
89
+ return clean or "LLM summary unavailable."
90
 
91
  def build_pdf(product,df,avg,tier,path,summary):
92
+ print("[PDF] building report …")
93
  pdf=FPDF(); pdf.set_auto_page_break(auto=True,margin=15); pdf.add_page()
94
  pdf.set_font("Helvetica","B",16)
95
  pdf.cell(0,10,latin1(f"AI Governance Maturity Report - {product}"),ln=1,align="C")
 
110
  pdf.cell(35,8,f"{avg:.2f}",1)
111
  pdf.cell(35,8,tier,1,ln=1)
112
  pdf.output(path)
113
+ print("[PDF] saved at", path)
114
 
115
  def generate_report(name,*scores):
116
+ product=name.strip() or "your product"
117
+ scores=list(scores)
118
  b_avgs={b:sum(scores[i] for i in idx)/len(idx) for b,idx in BUCKETS.items()}
119
+ avg=sum(scores)/len(scores)
120
+ tier=score_to_tier(avg)
121
  df=pd.DataFrame({"Bucket":b_avgs.keys(),
122
  "Avg":b_avgs.values(),
123
  "Tier":[score_to_tier(v) for v in b_avgs.values()]})
 
134
  out_md=gr.Markdown(); out_pdf=gr.File(label="⬇️ Download PDF")
135
  btn.click(generate_report,[name]+sliders,[out_md,out_pdf])
136
 
137
+ # set share=True for public URL
138
+ demo.launch(share=True)