rishabh5752 commited on
Commit
1649b2b
·
verified ·
1 Parent(s): 37c30bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -14
app.py CHANGED
@@ -3,11 +3,10 @@ import os, datetime, tempfile, traceback
3
  import gradio as gr
4
  import pandas as pd
5
  from fpdf import FPDF
6
- from openai import OpenAI
7
 
8
- # Use environment variable for security:
9
- # export OPENAI_API_KEY="sk-..."
10
- client = OpenAI() # picks up OPENAI_API_KEY from env (recommended) # docs: platform.openai.com
11
 
12
  QUESTIONS = [
13
  "Governance framework is documented and communicated across the organisation.",
@@ -57,30 +56,29 @@ def latin1(t: str) -> str:
57
 
58
  def llm_remediation(product: str, b_avgs: dict, overall_tier: str) -> str:
59
  """
60
- Calls OpenAI Chat Completions with gpt-4o-mini to generate the summary.
61
- Uses correct param names for the current SDK: client.chat.completions.create(..., max_tokens=...).
62
  """
63
  bucket_lines = "\n".join(f"{b}: {v:.2f}" for b, v in b_avgs.items())
64
  prompt = (
65
  f"Summarise the governance maturity for the product '{product}' at tier '{overall_tier}' "
66
- f"based on these bucket scores:\n"
67
- f"{bucket_lines}\n\n"
68
  "First, write a one-sentence overall assessment. Then, provide 3-5 markdown bullets "
69
  "suggesting next actions for improvement, mentioning bucket names."
70
  )
71
 
72
  try:
73
  print("[OpenAI] Prompt:", prompt)
74
- completion = client.chat.completions.create(
75
- model="gpt-4o-mini", # OpenAI's cost-efficient multimodal/chat model
76
  messages=[
77
  {"role": "system", "content": "You are an expert in AI governance maturity."},
78
  {"role": "user", "content": prompt},
79
  ],
80
- max_tokens=300, # correct parameter name for chat completions
81
- temperature=0.7, # allowed for this model; adjust to taste
82
  )
83
- out = (completion.choices[0].message.content or "").strip()
84
  print("[OpenAI] Raw output:", out)
85
  if len(out) > 20:
86
  return out
@@ -154,5 +152,5 @@ with gr.Blocks(title="Governance-GPT Quiz") as demo:
154
  btn.click(generate_report, [pname] + sliders, [md_out, file_out])
155
 
156
  if __name__ == "__main__":
157
- # You can pass share=True to get a public link if you like.
158
  demo.launch()
 
3
  import gradio as gr
4
  import pandas as pd
5
  from fpdf import FPDF
6
+ import openai # legacy-compatible import
7
 
8
+ # Read API key from env (legacy SDK uses this attribute)
9
+ openai.api_key = os.getenv("OPENAI_API_KEY", "")
 
10
 
11
  QUESTIONS = [
12
  "Governance framework is documented and communicated across the organisation.",
 
56
 
57
  def llm_remediation(product: str, b_avgs: dict, overall_tier: str) -> str:
58
  """
59
+ Calls OpenAI Chat Completions with gpt-4o-mini (legacy-compatible).
60
+ Uses `max_tokens` (correct for Chat Completions).
61
  """
62
  bucket_lines = "\n".join(f"{b}: {v:.2f}" for b, v in b_avgs.items())
63
  prompt = (
64
  f"Summarise the governance maturity for the product '{product}' at tier '{overall_tier}' "
65
+ f"based on these bucket scores:\n{bucket_lines}\n\n"
 
66
  "First, write a one-sentence overall assessment. Then, provide 3-5 markdown bullets "
67
  "suggesting next actions for improvement, mentioning bucket names."
68
  )
69
 
70
  try:
71
  print("[OpenAI] Prompt:", prompt)
72
+ response = openai.ChatCompletion.create(
73
+ model="gpt-4o-mini",
74
  messages=[
75
  {"role": "system", "content": "You are an expert in AI governance maturity."},
76
  {"role": "user", "content": prompt},
77
  ],
78
+ max_tokens=300, # correct param name for this endpoint
79
+ temperature=0.7, # allowed on chat models
80
  )
81
+ out = (response["choices"][0]["message"]["content"] or "").strip()
82
  print("[OpenAI] Raw output:", out)
83
  if len(out) > 20:
84
  return out
 
152
  btn.click(generate_report, [pname] + sliders, [md_out, file_out])
153
 
154
  if __name__ == "__main__":
155
+ # Set share=True if you want a public link
156
  demo.launch()