ZENLLC commited on
Commit
e65b6d2
·
verified ·
1 Parent(s): b760080

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +278 -112
app.py CHANGED
@@ -8,8 +8,9 @@ import requests
8
  import matplotlib.pyplot as plt
9
  from matplotlib.figure import Figure
10
 
 
11
  # ============================================================
12
- # LLM CALLER — GPT-4.1 ONLY
13
  # ============================================================
14
 
15
  def call_chat_completion(
@@ -18,11 +19,22 @@ def call_chat_completion(
18
  system_prompt: str,
19
  user_prompt: str,
20
  model: str = "gpt-4.1",
21
- max_completion_tokens: int = 2000,
22
  ) -> str:
 
 
 
 
 
 
 
 
23
 
24
  if not api_key:
25
- raise ValueError("Missing API key.")
 
 
 
26
 
27
  url = base_url.rstrip("/") + "/v1/chat/completions"
28
 
@@ -31,6 +43,7 @@ def call_chat_completion(
31
  "Content-Type": "application/json",
32
  }
33
 
 
34
  payload = {
35
  "model": model,
36
  "messages": [
@@ -40,32 +53,39 @@ def call_chat_completion(
40
  "max_completion_tokens": max_completion_tokens,
41
  }
42
 
43
- resp = requests.post(url, headers=headers, json=payload, timeout=60)
 
44
 
45
- # Fallback for providers requiring max_tokens
46
- if resp.status_code == 400 and "max_completion_tokens" in resp.text:
47
- payload["max_tokens"] = max_completion_tokens
48
  payload.pop("max_completion_tokens", None)
49
- resp = requests.post(url, headers=headers, json=payload, timeout=60)
 
50
 
51
- if resp.status_code != 200:
52
  raise RuntimeError(
53
- f"LLM API Error {resp.status_code}:\n{resp.text[:400]}"
54
  )
55
 
56
- data = resp.json()
 
57
  try:
58
  return data["choices"][0]["message"]["content"]
59
- except:
60
- raise RuntimeError(f"Malformed response:\n\n{json.dumps(data, indent=2)}")
 
 
 
61
 
62
 
63
  # ============================================================
64
- # SOP PROMPT + JSON PARSER
65
  # ============================================================
66
 
67
  SOP_SYSTEM_PROMPT = """
68
- You are an expert process engineer. Produce SOPs as JSON using:
 
 
69
 
70
  {
71
  "title": "",
@@ -90,7 +110,7 @@ You are an expert process engineer. Produce SOPs as JSON using:
90
  "versioning": {"version": "1.0","owner": "","last_updated": ""}
91
  }
92
 
93
- Return ONLY JSON.
94
  """
95
 
96
  def build_user_prompt(title, desc, industry, tone, detail):
@@ -100,21 +120,35 @@ Context: {desc}
100
  Industry: {industry}
101
  Tone: {tone}
102
  Detail Level: {detail}
103
- Audience: mid-career professionals.
104
  """
105
 
106
-
107
  def parse_sop_json(raw: str) -> Dict[str, Any]:
 
 
 
 
108
  txt = raw.strip()
 
 
109
  if txt.startswith("```"):
110
- txt = txt.split("```")[1]
 
 
 
 
 
111
 
112
- first = txt.find("{")
113
- last = txt.rfind("}")
114
- return json.loads(txt[first:last+1])
 
115
 
116
 
117
  def sop_to_markdown(sop: Dict[str, Any]) -> str:
 
 
 
118
 
119
  def bullet(items):
120
  if not items:
@@ -122,34 +156,56 @@ def sop_to_markdown(sop: Dict[str, Any]) -> str:
122
  return "\n".join(f"- {i}" for i in items)
123
 
124
  md = []
125
- md.append(f"# {sop.get('title','Untitled SOP')}\n")
126
 
127
- md.append("## 1. Purpose\n" + sop.get("purpose","N/A"))
128
- md.append("## 2. Scope\n" + sop.get("scope","N/A"))
 
 
 
 
129
 
130
- md.append("## 3. Definitions\n" + bullet(sop.get("definitions", [])))
 
 
131
 
132
- md.append("## 4. Roles & Responsibilities")
 
 
 
 
 
133
  for r in sop.get("roles", []):
134
- md.append(f"### {r.get('name','Role')}")
135
  md.append(bullet(r.get("responsibilities", [])))
136
 
137
- md.append("## 5. Prerequisites\n" + bullet(sop.get("prerequisites", [])))
 
 
138
 
139
- md.append("## 6. Procedure")
 
140
  for st in sop.get("steps", []):
141
  md.append(f"### Step {st['step_number']}: {st['title']}")
142
  md.append(f"**Owner:** {st['owner_role']}")
143
  md.append(st["description"])
144
- md.append("**Inputs:**\n" + bullet(st["inputs"]))
145
  md.append("**Outputs:**\n" + bullet(st["outputs"]))
146
 
147
- md.append("## 7. Escalation\n" + bullet(sop.get("escalation", [])))
148
- md.append("## 8. Metrics\n" + bullet(sop.get("metrics")))
149
- md.append("## 9. Risks\n" + bullet(sop.get("risks")))
 
 
 
 
150
 
 
 
 
 
 
151
  v = sop.get("versioning", {})
152
- md.append("## 10. Version Control")
153
  md.append(f"- Version: {v.get('version','1.0')}")
154
  md.append(f"- Owner: {v.get('owner','N/A')}")
155
  md.append(f"- Last Updated: {v.get('last_updated','N/A')}")
@@ -158,94 +214,141 @@ def sop_to_markdown(sop: Dict[str, Any]) -> str:
158
 
159
 
160
  # ============================================================
161
- # 🆕 PERFECTED DIAGRAM — AUTO-SIZE CARDS
162
  # ============================================================
163
 
164
  def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
 
 
 
 
 
 
 
 
165
  steps = sop.get("steps", [])
166
  if not steps:
167
- fig, ax = plt.subplots(figsize=(6,2))
168
- ax.text(0.5,0.5,"No steps to visualize.",ha="center",va="center")
 
 
 
 
169
  ax.axis("off")
170
  return fig
171
 
172
- # Dynamically compute figure height based on text amount
173
- total_height = 0
 
 
174
  block_heights = []
 
175
 
176
  for st in steps:
177
- desc_lines = textwrap.wrap(st["description"], width=65)
178
- num_lines = 2 + len(desc_lines) # title + owner + description lines
179
- block_h = 0.35 * num_lines
180
- block_heights.append(block_h)
181
- total_height += block_h + 0.3 # spacing
182
 
183
- fig_height = min(18, max(5, total_height))
184
- fig, ax = plt.subplots(figsize=(10, fig_height))
 
 
 
 
 
 
 
185
 
186
  y = total_height
187
 
 
 
 
188
  for idx, st in enumerate(steps):
 
 
189
  title = st["title"]
190
  owner = st["owner_role"]
191
- desc_lines = textwrap.wrap(st["description"], width=70)
192
- block_h = block_heights[idx]
193
 
194
- x0, x1 = 0.05, 0.95
 
 
195
 
 
196
  ax.add_patch(
197
  plt.Rectangle(
198
  (x0, y - block_h),
199
  x1 - x0,
200
  block_h,
201
  fill=False,
202
- linewidth=1.7
203
  )
204
  )
205
 
206
- # Number box
207
- nbw = 0.08
 
208
  ax.add_patch(
209
  plt.Rectangle(
210
  (x0, y - block_h),
211
- nbw,
212
  block_h,
213
  fill=False,
214
- linewidth=1.5
215
  )
216
  )
217
 
 
218
  ax.text(
219
- x0 + nbw/2,
220
  y - block_h/2,
221
  str(st["step_number"]),
222
- ha="center", va="center",
223
- fontsize=13, fontweight="bold"
 
 
224
  )
225
 
226
- text_x = x0 + nbw + 0.02
 
227
 
228
  # Title
229
- ax.text(text_x, y - 0.2,
230
- title,
231
- fontsize=12,
232
- fontweight="bold",
233
- ha="left", va="top")
 
 
 
 
234
 
235
  # Owner
236
- ax.text(text_x, y - 0.45,
237
- f"Owner: {owner}",
238
- fontsize=10,
239
- style="italic",
240
- ha="left", va="top")
 
 
 
 
241
 
242
- # Description (wrapped)
243
- text_y = y - 0.75
244
  for line in desc_lines:
245
- ax.text(text_x, text_y, line, fontsize=9, ha="left", va="top")
246
- text_y -= 0.28
 
 
 
 
 
 
 
247
 
248
- y -= (block_h + 0.3)
249
 
250
  ax.axis("off")
251
  fig.tight_layout()
@@ -253,41 +356,41 @@ def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
253
 
254
 
255
  # ============================================================
256
- # SAMPLE SCENARIOS
257
  # ============================================================
258
 
259
- SAMPLES = {
260
  "Volunteer Onboarding": {
261
  "title": "Volunteer Onboarding",
262
- "description": "Create SOP for onboarding volunteers: background checks, orientation, training, placement.",
263
  "industry": "Nonprofit"
264
  },
265
  "Remote Employee Onboarding": {
266
  "title": "Remote Employee Onboarding",
267
- "description": "SOP for remote hires including IT setup, HR docs, culture onboarding.",
268
  "industry": "HR"
269
  },
270
  "IT Outage Response": {
271
  "title": "IT Outage Response",
272
- "description": "Major outage response: detection, triage, escalation, comms, restoration, post-mortem.",
273
  "industry": "IT"
274
- },
275
  }
276
 
277
- def load_sample(name):
278
- if name not in SAMPLES:
279
  return "", "", "General"
280
- s = SAMPLES[name]
281
- return s["title"], s["description"], s["industry"]
282
 
283
 
284
  # ============================================================
285
- # MAIN GENERATOR
286
  # ============================================================
287
 
288
  def generate_sop(
289
  api_key_state,
290
- api_key_input,
291
  base_url,
292
  model,
293
  title,
@@ -297,12 +400,18 @@ def generate_sop(
297
  detail
298
  ):
299
 
300
- api_key = api_key_input or api_key_state
 
301
  if not api_key:
302
- return ("⚠️ Enter an API key.",
303
- "",
304
- create_sop_steps_figure({"steps": []}),
305
- api_key_state)
 
 
 
 
 
306
 
307
  try:
308
  user_prompt = build_user_prompt(title, desc, industry, tone, detail)
@@ -312,8 +421,8 @@ def generate_sop(
312
  base_url=base_url,
313
  system_prompt=SOP_SYSTEM_PROMPT,
314
  user_prompt=user_prompt,
315
- model="gpt-4.1", # 🔥 Forced stable model
316
- max_completion_tokens=2000
317
  )
318
 
319
  sop = parse_sop_json(raw)
@@ -324,50 +433,107 @@ def generate_sop(
324
  return md, json_out, fig, api_key
325
 
326
  except Exception as e:
327
- return (f"❌ Error generating SOP:\n{e}",
328
- "",
329
- create_sop_steps_figure({"steps": []}),
330
- api_key_state)
 
 
331
 
332
 
333
  # ============================================================
334
- # GRADIO UI
335
  # ============================================================
336
 
337
  with gr.Blocks(title="ZEN Simple SOP Builder") as demo:
338
 
339
  gr.Markdown("""
340
- # 🧭 ZEN Simple SOP Builder
341
- Generate clean SOPs + auto diagrams using **GPT-4.1**.
 
342
  """)
343
 
344
  api_key_state = gr.State("")
345
 
346
  with gr.Row():
 
347
  with gr.Column(scale=1):
348
- api_input = gr.Textbox("API Key", type="password")
349
- base_url = gr.Textbox("Base URL", value="https://api.openai.com")
350
- model_name = gr.Textbox("Model (GPT-4.1 only)", value="gpt-4.1")
351
 
352
- sample = gr.Dropdown("Sample SOP", choices=list(SAMPLES.keys()))
353
- load_btn = gr.Button("Load Sample")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
 
 
355
  with gr.Column(scale=2):
356
- title = gr.Textbox("SOP Title")
357
- desc = gr.Textbox("Description", lines=5)
358
- industry = gr.Textbox("Industry", value="General")
359
- tone = gr.Dropdown("Tone", ["Professional","Executive","Supportive"], value="Professional")
360
- detail = gr.Dropdown("Detail Level", ["Standard","High detail","Checklist"], value="Standard")
361
 
362
- gen_btn = gr.Button("🚀 Generate SOP", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
 
 
364
  sop_md = gr.Markdown()
365
  sop_json = gr.Code(language="json")
 
 
 
366
  sop_fig = gr.Plot()
367
 
 
368
  load_btn.click(load_sample, sample, [title, desc, industry])
369
 
370
- gen_btn.click(
371
  generate_sop,
372
  [api_key_state, api_input, base_url, model_name, title, desc, industry, tone, detail],
373
  [sop_md, sop_json, sop_fig, api_key_state],
 
8
  import matplotlib.pyplot as plt
9
  from matplotlib.figure import Figure
10
 
11
+
12
  # ============================================================
13
+ # LLM REQUEST HANDLER — GPT-4.1 ENFORCED
14
  # ============================================================
15
 
16
  def call_chat_completion(
 
19
  system_prompt: str,
20
  user_prompt: str,
21
  model: str = "gpt-4.1",
22
+ max_completion_tokens: int = 2400,
23
  ) -> str:
24
+ """
25
+ Universal OpenAI-compatible ChatCompletion caller.
26
+
27
+ - Enforces GPT-4.1 (stable, JSON-safe, supports large context)
28
+ - Removes temperature, top_p, etc. for compatibility
29
+ - Uses the new OpenAI spec: max_completion_tokens
30
+ - Falls back to legacy max_tokens for compatible APIs
31
+ """
32
 
33
  if not api_key:
34
+ raise ValueError("⚠️ Missing API key.")
35
+
36
+ if not base_url:
37
+ base_url = "https://api.openai.com"
38
 
39
  url = base_url.rstrip("/") + "/v1/chat/completions"
40
 
 
43
  "Content-Type": "application/json",
44
  }
45
 
46
+ # Primary OpenAI 2024+ payload
47
  payload = {
48
  "model": model,
49
  "messages": [
 
53
  "max_completion_tokens": max_completion_tokens,
54
  }
55
 
56
+ # First attempt
57
+ response = requests.post(url, headers=headers, json=payload, timeout=60)
58
 
59
+ # Some models require legacy name: max_tokens
60
+ if response.status_code == 400 and "max_completion_tokens" in response.text:
 
61
  payload.pop("max_completion_tokens", None)
62
+ payload["max_tokens"] = max_completion_tokens
63
+ response = requests.post(url, headers=headers, json=payload, timeout=60)
64
 
65
+ if response.status_code != 200:
66
  raise RuntimeError(
67
+ f"LLM Error {response.status_code}:\n{response.text[:500]}"
68
  )
69
 
70
+ data = response.json()
71
+
72
  try:
73
  return data["choices"][0]["message"]["content"]
74
+ except Exception as error:
75
+ raise RuntimeError(
76
+ "❌ Unexpected LLM response format:\n\n"
77
+ f"{json.dumps(data, indent=2)}"
78
+ ) from error
79
 
80
 
81
  # ============================================================
82
+ # SOP PROMPTS + PARSING
83
  # ============================================================
84
 
85
  SOP_SYSTEM_PROMPT = """
86
+ You are an elite SOP architect. Produce SOPs strictly in JSON.
87
+
88
+ Use this exact schema:
89
 
90
  {
91
  "title": "",
 
110
  "versioning": {"version": "1.0","owner": "","last_updated": ""}
111
  }
112
 
113
+ Return ONLY JSON — no prose, no commentary.
114
  """
115
 
116
  def build_user_prompt(title, desc, industry, tone, detail):
 
120
  Industry: {industry}
121
  Tone: {tone}
122
  Detail Level: {detail}
123
+ Audience: mid-career professionals needing clarity, accountability, and structure.
124
  """
125
 
 
126
  def parse_sop_json(raw: str) -> Dict[str, Any]:
127
+ """
128
+ Extracts JSON from LLM text output.
129
+ Handles cases where JSON comes wrapped in markdown fences.
130
+ """
131
  txt = raw.strip()
132
+
133
+ # Strip markdown fences
134
  if txt.startswith("```"):
135
+ parts = txt.split("```")
136
+ txt = next((p for p in parts if "{" in p), parts[-1])
137
+
138
+ # Extract JSON object boundaries
139
+ start = txt.find("{")
140
+ end = txt.rfind("}")
141
 
142
+ if start == -1 or end == -1:
143
+ raise ValueError("❌ No valid JSON detected.")
144
+
145
+ return json.loads(txt[start:end+1])
146
 
147
 
148
  def sop_to_markdown(sop: Dict[str, Any]) -> str:
149
+ """
150
+ Converts SOP JSON to clean, readable Markdown.
151
+ """
152
 
153
  def bullet(items):
154
  if not items:
 
156
  return "\n".join(f"- {i}" for i in items)
157
 
158
  md = []
 
159
 
160
+ # Header
161
+ md.append(f"# {sop.get('title','Untitled SOP')}")
162
+
163
+ # Purpose
164
+ md.append("\n## 1. Purpose")
165
+ md.append(sop.get("purpose", "N/A"))
166
 
167
+ # Scope
168
+ md.append("\n## 2. Scope")
169
+ md.append(sop.get("scope", "N/A"))
170
 
171
+ # Definitions
172
+ md.append("\n## 3. Definitions")
173
+ md.append(bullet(sop.get("definitions", [])))
174
+
175
+ # Roles
176
+ md.append("\n## 4. Roles & Responsibilities")
177
  for r in sop.get("roles", []):
178
+ md.append(f"### {r.get('name', 'Role')}")
179
  md.append(bullet(r.get("responsibilities", [])))
180
 
181
+ # Prerequisites
182
+ md.append("\n## 5. Prerequisites")
183
+ md.append(bullet(sop.get("prerequisites", [])))
184
 
185
+ # Steps
186
+ md.append("\n## 6. Procedure — Step-by-Step")
187
  for st in sop.get("steps", []):
188
  md.append(f"### Step {st['step_number']}: {st['title']}")
189
  md.append(f"**Owner:** {st['owner_role']}")
190
  md.append(st["description"])
191
+ md.append("\n**Inputs:**\n" + bullet(st["inputs"]))
192
  md.append("**Outputs:**\n" + bullet(st["outputs"]))
193
 
194
+ # Escalation
195
+ md.append("\n## 7. Escalation")
196
+ md.append(bullet(sop.get("escalation", [])))
197
+
198
+ # Metrics
199
+ md.append("\n## 8. Metrics")
200
+ md.append(bullet(sop.get("metrics", [])))
201
 
202
+ # Risks
203
+ md.append("\n## 9. Risks")
204
+ md.append(bullet(sop.get("risks", [])))
205
+
206
+ # Version
207
  v = sop.get("versioning", {})
208
+ md.append("\n## 10. Version Control")
209
  md.append(f"- Version: {v.get('version','1.0')}")
210
  md.append(f"- Owner: {v.get('owner','N/A')}")
211
  md.append(f"- Last Updated: {v.get('last_updated','N/A')}")
 
214
 
215
 
216
  # ============================================================
217
+ # PERFECTED DIAGRAM RENDERING — AUTO-SIZED CARDS
218
  # ============================================================
219
 
220
  def create_sop_steps_figure(sop: Dict[str, Any]) -> Figure:
221
+ """
222
+ Produces a fully professional workflow diagram with:
223
+ - auto-sized card heights
224
+ - description wrapping
225
+ - clean left number boxes
226
+ - modern minimal presentation
227
+ """
228
+
229
  steps = sop.get("steps", [])
230
  if not steps:
231
+ fig, ax = plt.subplots(figsize=(8,3))
232
+ ax.text(
233
+ 0.5, 0.5,
234
+ "No steps available.",
235
+ ha="center", va="center", fontsize=14
236
+ )
237
  ax.axis("off")
238
  return fig
239
 
240
+ # --------------------------------------------------------------------
241
+ # Determine total needed height by measuring text line counts
242
+ # --------------------------------------------------------------------
243
+
244
  block_heights = []
245
+ total_height = 0
246
 
247
  for st in steps:
248
+ title = st["title"]
249
+ owner = st["owner_role"]
250
+ desc_lines = textwrap.wrap(st["description"], width=70)
 
 
251
 
252
+ # at least 2 lines (title + owner)
253
+ num_lines = max(2 + len(desc_lines), 3)
254
+ block_height = num_lines * 0.35 + 0.5 # tuned for readability
255
+
256
+ block_heights.append(block_height)
257
+ total_height += block_height + 0.5 # spacing
258
+
259
+ fig_height = min(25, max(6, total_height))
260
+ fig, ax = plt.subplots(figsize=(12, fig_height))
261
 
262
  y = total_height
263
 
264
+ # --------------------------------------------------------------------
265
+ # Draw each step card
266
+ # --------------------------------------------------------------------
267
  for idx, st in enumerate(steps):
268
+
269
+ block_h = block_heights[idx]
270
  title = st["title"]
271
  owner = st["owner_role"]
272
+ desc_lines = textwrap.wrap(st["description"], width=72)
 
273
 
274
+ # Card horizontal bounds
275
+ x0 = 0.05
276
+ x1 = 0.95
277
 
278
+ # Draw card outline
279
  ax.add_patch(
280
  plt.Rectangle(
281
  (x0, y - block_h),
282
  x1 - x0,
283
  block_h,
284
  fill=False,
285
+ linewidth=1.8
286
  )
287
  )
288
 
289
+ # Draw number box
290
+ num_box_w = 0.085
291
+
292
  ax.add_patch(
293
  plt.Rectangle(
294
  (x0, y - block_h),
295
+ num_box_w,
296
  block_h,
297
  fill=False,
298
+ linewidth=1.6
299
  )
300
  )
301
 
302
+ # Step number text
303
  ax.text(
304
+ x0 + num_box_w/2,
305
  y - block_h/2,
306
  str(st["step_number"]),
307
+ fontsize=14,
308
+ fontweight="bold",
309
+ ha="center",
310
+ va="center"
311
  )
312
 
313
+ # Title + owner text start
314
+ text_x = x0 + num_box_w + 0.02
315
 
316
  # Title
317
+ ax.text(
318
+ text_x,
319
+ y - 0.35,
320
+ title,
321
+ fontsize=13,
322
+ fontweight="bold",
323
+ ha="left",
324
+ va="top"
325
+ )
326
 
327
  # Owner
328
+ ax.text(
329
+ text_x,
330
+ y - 0.75,
331
+ f"Owner: {owner}",
332
+ fontsize=11,
333
+ style="italic",
334
+ ha="left",
335
+ va="top"
336
+ )
337
 
338
+ # Description wrapped
339
+ text_y = y - 1.15
340
  for line in desc_lines:
341
+ ax.text(
342
+ text_x,
343
+ text_y,
344
+ line,
345
+ fontsize=10,
346
+ ha="left",
347
+ va="top"
348
+ )
349
+ text_y -= 0.32
350
 
351
+ y -= (block_h + 0.5)
352
 
353
  ax.axis("off")
354
  fig.tight_layout()
 
356
 
357
 
358
  # ============================================================
359
+ # SAMPLE PRESETS
360
  # ============================================================
361
 
362
+ SAMPLE_SOPS = {
363
  "Volunteer Onboarding": {
364
  "title": "Volunteer Onboarding",
365
+ "description": "Onboarding volunteers: background check, orientation, training, placement.",
366
  "industry": "Nonprofit"
367
  },
368
  "Remote Employee Onboarding": {
369
  "title": "Remote Employee Onboarding",
370
+ "description": "SOP for a hybrid workforce: setup, HR docs, orientation, training.",
371
  "industry": "HR"
372
  },
373
  "IT Outage Response": {
374
  "title": "IT Outage Response",
375
+ "description": "Major IT incident response: triage, escalation, remediation, communication.",
376
  "industry": "IT"
377
+ }
378
  }
379
 
380
+ def load_sample(name: str):
381
+ if name not in SAMPLE_SOPS:
382
  return "", "", "General"
383
+ sample = SAMPLE_SOPS[name]
384
+ return sample["title"], sample["description"], sample["industry"]
385
 
386
 
387
  # ============================================================
388
+ # MAIN GENERATION HANDLER
389
  # ============================================================
390
 
391
  def generate_sop(
392
  api_key_state,
393
+ api_input,
394
  base_url,
395
  model,
396
  title,
 
400
  detail
401
  ):
402
 
403
+ api_key = api_input or api_key_state
404
+
405
  if not api_key:
406
+ return (
407
+ "⚠️ Please enter an API key.",
408
+ "",
409
+ create_sop_steps_figure({"steps": []}),
410
+ api_key_state
411
+ )
412
+
413
+ # Always force GPT-4.1 for safety
414
+ model = "gpt-4.1"
415
 
416
  try:
417
  user_prompt = build_user_prompt(title, desc, industry, tone, detail)
 
421
  base_url=base_url,
422
  system_prompt=SOP_SYSTEM_PROMPT,
423
  user_prompt=user_prompt,
424
+ model=model,
425
+ max_completion_tokens=2400,
426
  )
427
 
428
  sop = parse_sop_json(raw)
 
433
  return md, json_out, fig, api_key
434
 
435
  except Exception as e:
436
+ return (
437
+ f"❌ Error generating SOP:\n{e}",
438
+ "",
439
+ create_sop_steps_figure({"steps": []}),
440
+ api_key_state
441
+ )
442
 
443
 
444
  # ============================================================
445
+ # UI — FULL GRADIO BLOCKS
446
  # ============================================================
447
 
448
  with gr.Blocks(title="ZEN Simple SOP Builder") as demo:
449
 
450
  gr.Markdown("""
451
+ # 🧭 ZEN Simple SOP Builder
452
+ Professional workflows SOP + Diagram
453
+ Powered by **GPT-4.1**
454
  """)
455
 
456
  api_key_state = gr.State("")
457
 
458
  with gr.Row():
459
+ # Left config column
460
  with gr.Column(scale=1):
 
 
 
461
 
462
+ gr.Markdown("### Step 1 — API Configuration")
463
+ api_input = gr.Textbox(
464
+ "API Key",
465
+ type="password",
466
+ placeholder="Enter OpenAI API key"
467
+ )
468
+
469
+ base_url = gr.Textbox(
470
+ "Base URL",
471
+ value="https://api.openai.com"
472
+ )
473
+
474
+ model_name = gr.Textbox(
475
+ "Model (forced to GPT-4.1)",
476
+ value="gpt-4.1"
477
+ )
478
+
479
+ # Samples
480
+ gr.Markdown("### Load a Sample SOP")
481
+ sample = gr.Dropdown(
482
+ "Sample SOP",
483
+ choices=list(SAMPLE_SOPS.keys())
484
+ )
485
+ load_btn = gr.Button("Load Example")
486
 
487
+ # Right SOP config column
488
  with gr.Column(scale=2):
 
 
 
 
 
489
 
490
+ gr.Markdown("### Step 2 — Describe Your SOP")
491
+
492
+ title = gr.Textbox(
493
+ "SOP Title",
494
+ placeholder="e.g. Volunteer Onboarding Workflow"
495
+ )
496
+
497
+ desc = gr.Textbox(
498
+ "Context / Summary",
499
+ lines=5,
500
+ placeholder="Describe what this SOP needs to cover..."
501
+ )
502
+
503
+ industry = gr.Textbox(
504
+ "Industry",
505
+ value="General"
506
+ )
507
+
508
+ tone = gr.Dropdown(
509
+ "Tone",
510
+ choices=["Professional", "Executive", "Supportive"],
511
+ value="Professional"
512
+ )
513
+
514
+ detail = gr.Dropdown(
515
+ "Detail Level",
516
+ choices=["Standard", "High detail", "Checklist"],
517
+ value="Standard"
518
+ )
519
+
520
+ generate_btn = gr.Button(
521
+ "🚀 Generate SOP",
522
+ variant="primary"
523
+ )
524
 
525
+ # SOP results
526
  sop_md = gr.Markdown()
527
  sop_json = gr.Code(language="json")
528
+
529
+ # Diagram
530
+ gr.Markdown("### Visual Workflow Diagram")
531
  sop_fig = gr.Plot()
532
 
533
+ # Events
534
  load_btn.click(load_sample, sample, [title, desc, industry])
535
 
536
+ generate_btn.click(
537
  generate_sop,
538
  [api_key_state, api_input, base_url, model_name, title, desc, industry, tone, detail],
539
  [sop_md, sop_json, sop_fig, api_key_state],