ZENLLC commited on
Commit
80afc4b
·
verified ·
1 Parent(s): 0b59de7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -88
app.py CHANGED
@@ -1,136 +1,242 @@
 
 
 
 
 
1
  import gradio as gr
2
  from openai import OpenAI
3
- from typing import List, Dict
4
 
5
- # ----------------------------
 
 
 
 
 
6
  # Client
7
- # ----------------------------
8
  def get_client(key: str) -> OpenAI:
9
  key = (key or "").strip()
10
  if not key:
11
  raise gr.Error("Please enter your OpenAI API key.")
12
  return OpenAI(api_key=key)
13
 
14
- # ----------------------------
15
- # GPT-5 Chat (messages format)
16
- # ----------------------------
17
- def chat_handle(api_key: str, user_input: str, history: List[Dict]):
18
-
19
- # Ensure history is a list of {"role": "...", "content": "..."}
20
  history = history or []
21
  user_input = (user_input or "").strip()
22
  if not user_input:
23
- # no change; just return existing state and clear the input
24
  return history, history, gr.update(value="")
25
 
26
- client = get_client(api_key)
27
-
28
- # Append the user message to conversation state used for the API call
29
  msgs = history + [{"role": "user", "content": user_input}]
30
-
31
  try:
32
  stream = client.chat.completions.create(
33
  model="gpt-5",
34
  messages=msgs,
35
  stream=True,
36
  )
37
-
38
- # Stream assistant tokens and update the UI
39
- assistant_accum = ""
40
- # While streaming we show a *temporary* assistant message
41
  for chunk in stream:
42
  delta = chunk.choices[0].delta.content or ""
43
- assistant_accum += delta
44
- # Show msgs plus the in-progress assistant message
45
- yield (msgs + [{"role": "assistant", "content": assistant_accum}]), msgs, gr.update(value="")
46
-
47
- # On completion, commit the assistant message to history
48
- final_history = msgs + [{"role": "assistant", "content": assistant_accum}]
49
- yield final_history, final_history, gr.update(value="")
50
-
51
  except Exception as e:
52
  err = f"[Error] {e}"
53
- final_history = msgs + [{"role": "assistant", "content": err}]
54
- yield final_history, final_history, gr.update(value="")
55
-
56
- # ----------------------------
57
- # Code Assistant (Explain / Refactor)
58
- # ----------------------------
59
- def analyze_code(api_key: str, code_snippet: str, mode: str) -> str:
60
- client = get_client(api_key)
61
- code = (code_snippet or "").strip()
62
- if not code:
63
- raise gr.Error("Paste some code first.")
64
-
65
- if mode == "Explain & Improve":
66
- prompt = (
67
- "You are an expert software engineer. Explain the following code clearly, "
68
- "point out issues, and propose concrete improvements with rationale. "
69
- "If relevant, show a revised snippet.\n\n"
70
- f"{code}"
71
- )
72
- else:
73
- prompt = (
74
- "You are an expert software engineer. Refactor the following code for clarity, "
75
- "performance, error handling, and maintainability. Keep the same behavior. "
76
- "Return the improved code first, then a short summary of changes.\n\n"
77
- f"{code}"
78
- )
79
-
80
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  resp = client.chat.completions.create(
82
  model="gpt-5",
83
- messages=[
84
- {"role": "system", "content": "You communicate clearly and concisely to advanced developers."},
85
- {"role": "user", "content": prompt},
86
- ],
87
  )
88
- return resp.choices[0].message.content
89
- except Exception as e:
90
- raise gr.Error(f"Code analysis failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- # ----------------------------
93
  # UI
94
- # ----------------------------
95
- with gr.Blocks(title="ZEN GPT-5 Production SDK") as demo:
96
  gr.Markdown("### 🔐 Enter your OpenAI API key (not stored)")
97
  api_key = gr.Textbox(placeholder="sk-...", type="password", label="OpenAI API Key")
98
 
99
  with gr.Tab("💬 Chat"):
100
  chatbox = gr.Chatbot(label="GPT-5 Chat", height=420, type="messages")
101
- history_state = gr.State([]) # list[dict(role, content)]
102
  user_in = gr.Textbox(placeholder="Say hi…", label="Message")
103
  send_btn = gr.Button("Send", variant="primary")
104
  clear_btn = gr.Button("Clear Chat")
105
 
106
- # Wire up: send triggers streaming; we update chatbox + state + clear input
107
- send_btn.click(
108
- chat_handle,
109
- inputs=[api_key, user_in, history_state],
110
- outputs=[chatbox, history_state, user_in],
111
- queue=True,
112
- )
113
- user_in.submit(
114
- chat_handle,
115
- inputs=[api_key, user_in, history_state],
116
- outputs=[chatbox, history_state, user_in],
117
- queue=True,
118
- )
119
- clear_btn.click(lambda: ([], []), inputs=None, outputs=[chatbox, history_state])
120
 
121
- with gr.Tab("💻 Code Assistant"):
122
- gr.Markdown("Use GPT-5 to **explain or refactor** any code.")
 
 
 
 
123
  mode = gr.Radio(
124
- ["Explain & Improve", "Refactor Only"],
125
- value="Explain & Improve",
126
- label="Mode"
127
  )
128
- code_input = gr.Code(label="Paste your code", language="python", lines=20)
129
- run_btn = gr.Button("Analyze Code", variant="primary")
130
- result = gr.Textbox(label="Result", lines=20)
131
- run_btn.click(analyze_code, [api_key, code_input, mode], result)
132
 
133
- # Subtle program stamp (bottom-right)
134
  gr.HTML(
135
  "<div style='text-align:right; font-size:12px; opacity:0.55; margin-top:10px;'>"
136
  "Module 3 – ZEN SDK Production"
 
1
+ import io
2
+ import os
3
+ import base64
4
+ from typing import List, Dict, Tuple
5
+
6
  import gradio as gr
7
  from openai import OpenAI
 
8
 
9
+ # Optional parsers
10
+ import pandas as pd
11
+ from pypdf import PdfReader
12
+ from docx import Document as DocxDocument
13
+
14
+ # ===============================
15
  # Client
16
+ # ===============================
17
  def get_client(key: str) -> OpenAI:
18
  key = (key or "").strip()
19
  if not key:
20
  raise gr.Error("Please enter your OpenAI API key.")
21
  return OpenAI(api_key=key)
22
 
23
+ # ===============================
24
+ # Chat (messages format + streaming)
25
+ # ===============================
26
+ def stream_chat(api_key: str, user_input: str, history: List[Dict]):
27
+ client = get_client(api_key)
 
28
  history = history or []
29
  user_input = (user_input or "").strip()
30
  if not user_input:
 
31
  return history, history, gr.update(value="")
32
 
 
 
 
33
  msgs = history + [{"role": "user", "content": user_input}]
 
34
  try:
35
  stream = client.chat.completions.create(
36
  model="gpt-5",
37
  messages=msgs,
38
  stream=True,
39
  )
40
+ acc = ""
 
 
 
41
  for chunk in stream:
42
  delta = chunk.choices[0].delta.content or ""
43
+ acc += delta
44
+ yield msgs + [{"role": "assistant", "content": acc}], msgs, gr.update(value="")
45
+ final_hist = msgs + [{"role": "assistant", "content": acc}]
46
+ yield final_hist, final_hist, gr.update(value="")
 
 
 
 
47
  except Exception as e:
48
  err = f"[Error] {e}"
49
+ final_hist = msgs + [{"role": "assistant", "content": err}]
50
+ yield final_hist, final_hist, gr.update(value="")
51
+
52
+ # ===============================
53
+ # Pro Brief File ingestion
54
+ # ===============================
55
+ TEXT_EXTS = {".txt", ".md", ".markdown"}
56
+ DOCX_EXTS = {".docx"}
57
+ PDF_EXTS = {".pdf"}
58
+ CSV_EXTS = {".csv"}
59
+
60
+ def _ext(path: str) -> str:
61
+ return os.path.splitext(path.lower())[1]
62
+
63
+ def read_text_file(fp: str) -> str:
64
+ # Try utf-8, fallback to latin-1
 
 
 
 
 
 
 
 
 
 
 
65
  try:
66
+ with open(fp, "r", encoding="utf-8") as f:
67
+ return f.read()
68
+ except UnicodeDecodeError:
69
+ with open(fp, "r", encoding="latin-1") as f:
70
+ return f.read()
71
+
72
+ def read_pdf(fp: str) -> str:
73
+ text = []
74
+ with open(fp, "rb") as f:
75
+ reader = PdfReader(f)
76
+ for page in reader.pages:
77
+ txt = page.extract_text() or ""
78
+ text.append(txt)
79
+ return "\n".join(text).strip()
80
+
81
+ def read_docx(fp: str) -> str:
82
+ doc = DocxDocument(fp)
83
+ return "\n".join([p.text for p in doc.paragraphs]).strip()
84
+
85
+ def summarize_csv(fp: str) -> str:
86
+ # Compute useful stats and include a small sampling; return as text for the LLM
87
+ try:
88
+ df = pd.read_csv(fp)
89
+ except Exception:
90
+ # fallback: try semicolon or tab
91
+ try:
92
+ df = pd.read_csv(fp, sep=";")
93
+ except Exception:
94
+ df = pd.read_csv(fp, sep="\t")
95
+
96
+ shape_info = f"Rows: {df.shape[0]}, Columns: {df.shape[1]}"
97
+ cols = ", ".join([f"{c} ({str(df[c].dtype)})" for c in df.columns])
98
+ desc = df.describe(include="all").transpose().fillna("").to_string()
99
+ head = df.head(10).to_string(index=False)
100
+
101
+ return (
102
+ "CSV SUMMARY\n"
103
+ f"{shape_info}\n\n"
104
+ f"COLUMNS & TYPES:\n{cols}\n\n"
105
+ f"DESCRIBE():\n{desc}\n\n"
106
+ f"FIRST 10 ROWS:\n{head}\n"
107
+ )
108
+
109
+ def load_files(files: List[gr.File]) -> Tuple[str, List[str]]:
110
+ if not files:
111
+ raise gr.Error("Please upload at least one file (PDF, DOCX, TXT, MD, or CSV).")
112
+ texts = []
113
+ names = []
114
+ for f in files:
115
+ path = f.name
116
+ names.append(os.path.basename(path))
117
+ ext = _ext(path)
118
+ if ext in TEXT_EXTS:
119
+ texts.append(read_text_file(path))
120
+ elif ext in PDF_EXTS:
121
+ texts.append(read_pdf(path))
122
+ elif ext in DOCX_EXTS:
123
+ texts.append(read_docx(path))
124
+ elif ext in CSV_EXTS:
125
+ texts.append(summarize_csv(path))
126
+ else:
127
+ raise gr.Error(f"Unsupported file type: {ext}")
128
+ return "\n\n-----\n\n".join(texts), names
129
+
130
+ # ===============================
131
+ # Pro Brief – Chunking & synthesis
132
+ # ===============================
133
+ def chunk_text(s: str, max_chars: int = 12000) -> List[str]:
134
+ s = s.strip()
135
+ if len(s) <= max_chars:
136
+ return [s]
137
+ chunks = []
138
+ start = 0
139
+ while start < len(s):
140
+ end = min(start + max_chars, len(s))
141
+ # try to break on paragraph boundary
142
+ cut = s.rfind("\n\n", start, end)
143
+ if cut == -1 or cut <= start + 2000:
144
+ cut = end
145
+ chunks.append(s[start:cut])
146
+ start = cut
147
+ return chunks
148
+
149
+ def llm_summarize_chunks(client: OpenAI, chunks: List[str], mode: str, custom_note: str) -> List[str]:
150
+ summaries = []
151
+ for i, ch in enumerate(chunks, start=1):
152
+ mode_prompt = {
153
+ "Executive Brief": (
154
+ "Create a crisp executive brief with sections: Context, Key Findings, Metrics, Implications, Decisions Needed."
155
+ ),
156
+ "Action Items": (
157
+ "Extract actionable tasks with owners (if available), deadlines (if implied), dependencies, and priority."
158
+ ),
159
+ "Risks & Mitigations": (
160
+ "Identify key risks, likelihood, impact, and concrete mitigations. Include watchpoints and triggers."
161
+ ),
162
+ "Meeting Minutes": (
163
+ "Produce clean, structured minutes: Attendees (if inferable), Agenda, Discussion, Decisions, Action Items."
164
+ ),
165
+ "JSON Summary": (
166
+ "Return a compact JSON with keys: context, findings[], metrics{}, actions[], risks[], decisions[]."
167
+ ),
168
+ }[mode]
169
+
170
+ sys = "You are a senior analyst. Write succinctly; use bullet points where appropriate."
171
+ usr = f"{mode_prompt}\n\n{('Additional guidance: ' + custom_note) if custom_note else ''}\n\n---\nSOURCE CHUNK {i}/{len(chunks)}:\n{ch}\n"
172
  resp = client.chat.completions.create(
173
  model="gpt-5",
174
+ messages=[{"role": "system", "content": sys},
175
+ {"role": "user", "content": usr}],
 
 
176
  )
177
+ summaries.append(resp.choices[0].message.content.strip())
178
+ return summaries
179
+
180
+ def llm_synthesize_final(client: OpenAI, mode: str, names: List[str], partials: List[str], custom_note: str) -> str:
181
+ sys = "You are a chief of staff producing board-ready output. Tight, accurate, and well-structured."
182
+ corpus = "\n\n---\n\n".join([f"[PART {i+1}]\n{p}" for i, p in enumerate(partials)])
183
+ usr = (
184
+ f"Files analyzed: {', '.join(names)}\n\n"
185
+ f"Mode: {mode}\n"
186
+ f"{('Additional guidance: ' + custom_note) if custom_note else ''}\n\n"
187
+ "Synthesize the PARTS into a single cohesive deliverable. If JSON mode, return only JSON."
188
+ "\n\n---\nCORPUS (SUMMARIES):\n" + corpus
189
+ )
190
+ resp = client.chat.completions.create(
191
+ model="gpt-5",
192
+ messages=[{"role": "system", "content": sys},
193
+ {"role": "user", "content": usr}],
194
+ )
195
+ return resp.choices[0].message.content.strip()
196
+
197
+ def pro_brief(api_key: str, files: List[gr.File], mode: str, custom_note: str) -> str:
198
+ client = get_client(api_key)
199
+ raw_text, names = load_files(files)
200
+ chunks = chunk_text(raw_text, max_chars=12000)
201
+ partials = llm_summarize_chunks(client, chunks, mode, custom_note)
202
+ final = llm_synthesize_final(client, mode, names, partials, custom_note)
203
+ return final
204
 
205
+ # ===============================
206
  # UI
207
+ # ===============================
208
+ with gr.Blocks(title="ZEN GPT-5 Production Tools") as demo:
209
  gr.Markdown("### 🔐 Enter your OpenAI API key (not stored)")
210
  api_key = gr.Textbox(placeholder="sk-...", type="password", label="OpenAI API Key")
211
 
212
  with gr.Tab("💬 Chat"):
213
  chatbox = gr.Chatbot(label="GPT-5 Chat", height=420, type="messages")
214
+ history_state = gr.State([])
215
  user_in = gr.Textbox(placeholder="Say hi…", label="Message")
216
  send_btn = gr.Button("Send", variant="primary")
217
  clear_btn = gr.Button("Clear Chat")
218
 
219
+ send_btn.click(stream_chat, [api_key, user_in, history_state], [chatbox, history_state, user_in], queue=True)
220
+ user_in.submit(stream_chat, [api_key, user_in, history_state], [chatbox, history_state, user_in], queue=True)
221
+ clear_btn.click(lambda: ([], []), None, [chatbox, history_state])
 
 
 
 
 
 
 
 
 
 
 
222
 
223
+ with gr.Tab("📄 Pro Brief (Docs → Executive Output)"):
224
+ gr.Markdown(
225
+ "Upload PDFs, DOCX, TXT, MD, or CSV. Get an **Executive Brief**, **Action Items**, "
226
+ "**Risks & Mitigations**, **Meeting Minutes**, or a **JSON Summary**."
227
+ )
228
+ files = gr.File(label="Upload files", file_count="multiple", type="filepath")
229
  mode = gr.Radio(
230
+ ["Executive Brief", "Action Items", "Risks & Mitigations", "Meeting Minutes", "JSON Summary"],
231
+ value="Executive Brief",
232
+ label="Output Mode",
233
  )
234
+ custom = gr.Textbox(label="Optional guidance (tone, audience, focus areas)", lines=3, placeholder="e.g., Board-ready; focus on budget impact and timeline risk.")
235
+ run = gr.Button("Generate Pro Brief", variant="primary")
236
+ out = gr.Markdown(label="Output", show_copy_button=True)
237
+ run.click(pro_brief, [api_key, files, mode, custom], out)
238
 
239
+ # Subtle program stamp
240
  gr.HTML(
241
  "<div style='text-align:right; font-size:12px; opacity:0.55; margin-top:10px;'>"
242
  "Module 3 – ZEN SDK Production"