iammraat commited on
Commit
ca97bcb
Β·
verified Β·
1 Parent(s): 5a52bd1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -116
app.py CHANGED
@@ -1,79 +1,3 @@
1
- # import gradio as gr
2
-
3
- # # 1. Add the CSS string at the top of your file
4
- # CSS = """
5
- # .container { max-width: 1200px; margin: auto; }
6
- # /* This forces tables to scroll horizontally instead of squishing */
7
- # .prose table {
8
- # display: block;
9
- # overflow-x: auto;
10
- # white-space: nowrap;
11
- # width: 100%;
12
- # }
13
- # .prose th, .prose td {
14
- # padding: 10px;
15
- # border: 1px solid #444;
16
- # min-width: 150px;
17
- # }
18
- # """
19
-
20
- # print("Initializing AI trained Models... (This will run core_engine.py)")
21
-
22
- # # 2. Just import it!
23
- # # Because your engine initializations are at the bottom of core_engine.py,
24
- # # this single import line will automatically trigger them to load.
25
- # from core_engine import (
26
- # run_multi_step_workflow,
27
- # chat_memory,
28
- # engine_derby,
29
- # engine_influx
30
- # )
31
-
32
- # print("Engine Ready! Launching UI...")
33
-
34
- # # 3. Define the Gradio Chat Function
35
- # def respond_to_coworker(message, history):
36
- # try:
37
- # # Call your orchestrator directly
38
- # response = run_multi_step_workflow(
39
- # message,
40
- # engine_derby,
41
- # engine_influx,
42
- # chat_memory,
43
- # debug=True # Set to False if you want to hide terminal logs
44
- # )
45
- # return response
46
- # except Exception as e:
47
- # import traceback
48
- # traceback.print_exc()
49
- # return f"🚨 Engine Error: {str(e)}"
50
-
51
- # # 4. Build the UI using your Blocks structure
52
- # with gr.Blocks(css=CSS) as demo:
53
- # gr.Markdown("# πŸ“Š DBA Diagnostic Copilot")
54
- # gr.ChatInterface(
55
- # fn=respond_to_coworker,
56
- # examples=[
57
- # "Identify the single worst spike and show me its execution plan.",
58
- # "List all targets running on Linux.",
59
- # "What are the top sql issues on the production target?",
60
- # "What is the current status of all my targets?"
61
- # ]
62
- # )
63
-
64
- # # 5. Launch unconditionally to prevent the "Exit code: 0" bug on HF Spaces
65
- # print("Starting Gradio server...")
66
- # demo.launch()
67
-
68
-
69
-
70
-
71
-
72
-
73
-
74
-
75
-
76
-
77
 
78
  # import gradio as gr
79
 
@@ -96,7 +20,7 @@
96
 
97
  # print("Initializing AI trained Models... (This will run core_engine.py)")
98
 
99
- # # 2. Import the CLASS (DialogueStateTracker), not the global instance
100
  # from core_engine import (
101
  # run_multi_step_workflow,
102
  # DialogueStateTracker,
@@ -124,7 +48,7 @@
124
  # # The main brain: runs the engine using the cleanly saved message string
125
  # def bot_response(saved_msg, history, session_memory):
126
  # try:
127
- # # 1. Run the engine using the pure string, avoiding any 'list' object errors
128
  # response = run_multi_step_workflow(
129
  # saved_msg,
130
  # engine_derby,
@@ -133,18 +57,25 @@
133
  # debug=True
134
  # )
135
 
 
 
 
 
 
 
 
 
 
 
136
  # # Safely update the blank assistant placeholder regardless of Gradio version
137
  # if isinstance(history[-1], dict):
138
- # history[-1]["content"] = str(response)
139
  # elif hasattr(history[-1], "content"): # For Gradio dataclasses
140
- # history[-1].content = str(response)
141
  # else: # Fallback for old Gradio versions
142
- # history[-1] = (history[-1][0], str(response))
143
-
144
- # # 2. Extract the current context to update the UI Status Bar
145
- # state = session_memory.get_state()
146
- # active_target = state.get("active_target")
147
 
 
148
  # if active_target:
149
  # status = f"🎯 **Current Context:** `{active_target}`"
150
  # else:
@@ -166,17 +97,17 @@
166
 
167
  # return history, gr.update()
168
 
169
- # # 3. Build the UI
170
  # with gr.Blocks() as demo:
171
  # gr.Markdown("# πŸ“Š DBA Diagnostic Copilot")
172
 
173
  # status_bar = gr.Markdown("🌐 **Current Context:** Global (Derby/Influx)")
174
 
175
- # chatbot = gr.Chatbot(height=550) # No 'type' argument needed here anymore
176
 
177
- # # State variables
178
  # chat_memory = gr.State(init_memory)
179
- # saved_msg = gr.State("") # NEW: Safely holds the pure string message
180
 
181
  # with gr.Row():
182
  # msg = gr.Textbox(
@@ -239,13 +170,13 @@
239
 
240
 
241
 
242
-
243
  import gradio as gr
244
 
245
- # 1. Add the CSS string at the top of your file
246
  CSS = """
247
  .container { max-width: 1200px; margin: auto; }
248
- /* This forces tables to scroll horizontally instead of squishing */
 
249
  .prose table {
250
  display: block;
251
  overflow-x: auto;
@@ -257,11 +188,80 @@ CSS = """
257
  border: 1px solid #444;
258
  min-width: 150px;
259
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  """
261
 
262
  print("Initializing AI trained Models... (This will run core_engine.py)")
263
 
264
- # 2. Import the components from your engine
265
  from core_engine import (
266
  run_multi_step_workflow,
267
  DialogueStateTracker,
@@ -271,25 +271,19 @@ from core_engine import (
271
 
272
  print("Engine Ready! Launching UI...")
273
 
274
- # Helper to initialize a fresh, private memory for every new user
275
  def init_memory():
276
  return DialogueStateTracker()
277
 
278
- # Prepares the UI instantly and safely saves the raw string typed by the user
279
  def prepare_msg(message, history):
280
- # Force the string cast just to be absolutely safe
281
  clean_message = str(message)
282
-
283
  new_history = history + [
284
  {"role": "user", "content": clean_message},
285
  {"role": "assistant", "content": ""}
286
  ]
287
  return "", new_history, clean_message
288
 
289
- # The main brain: runs the engine using the cleanly saved message string
290
  def bot_response(saved_msg, history, session_memory):
291
  try:
292
- # 1. Run the engine using the pure string
293
  response = run_multi_step_workflow(
294
  saved_msg,
295
  engine_derby,
@@ -298,29 +292,28 @@ def bot_response(saved_msg, history, session_memory):
298
  debug=True
299
  )
300
 
301
- # 2. Grab the state from the engine
302
  state = session_memory.get_state()
303
  active_target = state.get("active_target")
304
 
305
- # 3. Inject the visual context indicator directly into the chat bubble!
306
  if active_target:
307
  response = f"> 🎯 **Context Locked:** `{active_target}`\n\n" + str(response)
308
  else:
309
  response = str(response)
310
 
311
- # Safely update the blank assistant placeholder regardless of Gradio version
312
  if isinstance(history[-1], dict):
313
  history[-1]["content"] = response
314
- elif hasattr(history[-1], "content"): # For Gradio dataclasses
315
  history[-1].content = response
316
- else: # Fallback for old Gradio versions
317
  history[-1] = (history[-1][0], response)
318
 
319
- # 4. Extract the current context to update the UI Status Bar as well
320
  if active_target:
321
- status = f"🎯 **Current Context:** `{active_target}`"
322
  else:
323
- status = f"🌐 **Current Context:** Global (Derby/Influx)"
324
 
325
  return history, status
326
 
@@ -338,17 +331,18 @@ def bot_response(saved_msg, history, session_memory):
338
 
339
  return history, gr.update()
340
 
341
- # 5. Build the UI
342
- with gr.Blocks() as demo:
343
  gr.Markdown("# πŸ“Š DBA Diagnostic Copilot")
344
 
345
- status_bar = gr.Markdown("🌐 **Current Context:** Global (Derby/Influx)")
 
346
 
347
- chatbot = gr.Chatbot(height=550)
348
-
349
- # State variables (Private per user tab)
350
  chat_memory = gr.State(init_memory)
351
- saved_msg = gr.State("") # Safely holds the pure string message
 
 
 
352
 
353
  with gr.Row():
354
  msg = gr.Textbox(
@@ -363,12 +357,11 @@ with gr.Blocks() as demo:
363
 
364
  def reset_context(memory):
365
  memory.clear_target_context()
366
- return memory, "🌐 **Current Context:** Global (Derby/Influx)"
367
 
368
  reset_ctx_btn = gr.Button("πŸ”„ Clear Target Context", scale=2)
369
  reset_ctx_btn.click(reset_context, inputs=[chat_memory], outputs=[chat_memory, status_bar])
370
 
371
- # Event wiring: Pass the saved string state to the bot_response
372
  msg.submit(
373
  prepare_msg,
374
  inputs=[msg, chatbot],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
2
  # import gradio as gr
3
 
 
20
 
21
  # print("Initializing AI trained Models... (This will run core_engine.py)")
22
 
23
+ # # 2. Import the components from your engine
24
  # from core_engine import (
25
  # run_multi_step_workflow,
26
  # DialogueStateTracker,
 
48
  # # The main brain: runs the engine using the cleanly saved message string
49
  # def bot_response(saved_msg, history, session_memory):
50
  # try:
51
+ # # 1. Run the engine using the pure string
52
  # response = run_multi_step_workflow(
53
  # saved_msg,
54
  # engine_derby,
 
57
  # debug=True
58
  # )
59
 
60
+ # # 2. Grab the state from the engine
61
+ # state = session_memory.get_state()
62
+ # active_target = state.get("active_target")
63
+
64
+ # # 3. Inject the visual context indicator directly into the chat bubble!
65
+ # if active_target:
66
+ # response = f"> 🎯 **Context Locked:** `{active_target}`\n\n" + str(response)
67
+ # else:
68
+ # response = str(response)
69
+
70
  # # Safely update the blank assistant placeholder regardless of Gradio version
71
  # if isinstance(history[-1], dict):
72
+ # history[-1]["content"] = response
73
  # elif hasattr(history[-1], "content"): # For Gradio dataclasses
74
+ # history[-1].content = response
75
  # else: # Fallback for old Gradio versions
76
+ # history[-1] = (history[-1][0], response)
 
 
 
 
77
 
78
+ # # 4. Extract the current context to update the UI Status Bar as well
79
  # if active_target:
80
  # status = f"🎯 **Current Context:** `{active_target}`"
81
  # else:
 
97
 
98
  # return history, gr.update()
99
 
100
+ # # 5. Build the UI
101
  # with gr.Blocks() as demo:
102
  # gr.Markdown("# πŸ“Š DBA Diagnostic Copilot")
103
 
104
  # status_bar = gr.Markdown("🌐 **Current Context:** Global (Derby/Influx)")
105
 
106
+ # chatbot = gr.Chatbot(height=550)
107
 
108
+ # # State variables (Private per user tab)
109
  # chat_memory = gr.State(init_memory)
110
+ # saved_msg = gr.State("") # Safely holds the pure string message
111
 
112
  # with gr.Row():
113
  # msg = gr.Textbox(
 
170
 
171
 
172
 
 
173
  import gradio as gr
174
 
175
+ # 1. CSS for Horizontal Tables & Terminal Styling
176
  CSS = """
177
  .container { max-width: 1200px; margin: auto; }
178
+
179
+ /* Forces tables to scroll horizontally */
180
  .prose table {
181
  display: block;
182
  overflow-x: auto;
 
188
  border: 1px solid #444;
189
  min-width: 150px;
190
  }
191
+
192
+ /* Terminal-like Status Badge above the input box */
193
+ .terminal-status {
194
+ padding: 6px 12px;
195
+ background-color: #1e1e1e;
196
+ color: #10B981; /* Emerald Green */
197
+ border-radius: 4px;
198
+ font-family: 'Courier New', Courier, monospace;
199
+ font-size: 14px;
200
+ font-weight: bold;
201
+ display: inline-block;
202
+ border: 1px solid #333;
203
+ margin-bottom: -10px;
204
+ }
205
+ """
206
+
207
+ # 2. Custom JavaScript for Terminal 'Up/Down' Arrow History
208
+ # This script waits for the Gradio textarea to render, then attaches a keydown listener
209
+ # to cycle through previously sent messages exactly like a bash terminal.
210
+ JS_HEAD = """
211
+ <script>
212
+ document.addEventListener('DOMContentLoaded', function() {
213
+ let setupDone = false;
214
+ setInterval(() => {
215
+ if (setupDone) return;
216
+ const textareas = document.querySelectorAll('textarea');
217
+ if(textareas.length > 0) {
218
+ const inputField = textareas[0];
219
+ let history = [];
220
+ let historyIndex = -1;
221
+ let currentDraft = "";
222
+
223
+ inputField.addEventListener('keydown', (e) => {
224
+ // Save to history on Enter (without Shift)
225
+ if (e.key === 'Enter' && !e.shiftKey) {
226
+ const val = inputField.value.trim();
227
+ if (val && (history.length === 0 || history[0] !== val)) {
228
+ history.unshift(val); // Add to beginning of history
229
+ }
230
+ historyIndex = -1;
231
+ }
232
+ // Navigate up in history
233
+ else if (e.key === 'ArrowUp') {
234
+ if (history.length > 0) {
235
+ e.preventDefault();
236
+ if (historyIndex === -1) currentDraft = inputField.value;
237
+ historyIndex = Math.min(historyIndex + 1, history.length - 1);
238
+ inputField.value = history[historyIndex];
239
+ inputField.dispatchEvent(new Event('input', { bubbles: true })); // Tell Gradio it changed
240
+ }
241
+ }
242
+ // Navigate down in history
243
+ else if (e.key === 'ArrowDown') {
244
+ if (historyIndex > -1) {
245
+ e.preventDefault();
246
+ historyIndex--;
247
+ if (historyIndex === -1) {
248
+ inputField.value = currentDraft;
249
+ } else {
250
+ inputField.value = history[historyIndex];
251
+ }
252
+ inputField.dispatchEvent(new Event('input', { bubbles: true }));
253
+ }
254
+ }
255
+ });
256
+ setupDone = true;
257
+ }
258
+ }, 500); // Check every 500ms until Gradio renders the textbox
259
+ });
260
+ </script>
261
  """
262
 
263
  print("Initializing AI trained Models... (This will run core_engine.py)")
264
 
 
265
  from core_engine import (
266
  run_multi_step_workflow,
267
  DialogueStateTracker,
 
271
 
272
  print("Engine Ready! Launching UI...")
273
 
 
274
  def init_memory():
275
  return DialogueStateTracker()
276
 
 
277
  def prepare_msg(message, history):
 
278
  clean_message = str(message)
 
279
  new_history = history + [
280
  {"role": "user", "content": clean_message},
281
  {"role": "assistant", "content": ""}
282
  ]
283
  return "", new_history, clean_message
284
 
 
285
  def bot_response(saved_msg, history, session_memory):
286
  try:
 
287
  response = run_multi_step_workflow(
288
  saved_msg,
289
  engine_derby,
 
292
  debug=True
293
  )
294
 
 
295
  state = session_memory.get_state()
296
  active_target = state.get("active_target")
297
 
298
+ # Keep the visual context indicator in the chat bubble for historical logging
299
  if active_target:
300
  response = f"> 🎯 **Context Locked:** `{active_target}`\n\n" + str(response)
301
  else:
302
  response = str(response)
303
 
304
+ # Safely update the history
305
  if isinstance(history[-1], dict):
306
  history[-1]["content"] = response
307
+ elif hasattr(history[-1], "content"):
308
  history[-1].content = response
309
+ else:
310
  history[-1] = (history[-1][0], response)
311
 
312
+ # Update the Terminal Prompt Status Bar
313
  if active_target:
314
+ status = f"<div class='terminal-status'>[ Target : {active_target} ] $ </div>"
315
  else:
316
+ status = f"<div class='terminal-status'>[ Target : Global (Derby/Influx) ] $ </div>"
317
 
318
  return history, status
319
 
 
331
 
332
  return history, gr.update()
333
 
334
+ # 3. Build the UI with the JS Header Injection
335
+ with gr.Blocks(head=JS_HEAD) as demo:
336
  gr.Markdown("# πŸ“Š DBA Diagnostic Copilot")
337
 
338
+ # SHORTER CHATBOT HEIGHT (400px instead of 550px)
339
+ chatbot = gr.Chatbot(height=400)
340
 
 
 
 
341
  chat_memory = gr.State(init_memory)
342
+ saved_msg = gr.State("")
343
+
344
+ # NEW LOCATION: Status bar is now right above the input box!
345
+ status_bar = gr.Markdown("<div class='terminal-status'>[ Target : Global (Derby/Influx) ] $ </div>")
346
 
347
  with gr.Row():
348
  msg = gr.Textbox(
 
357
 
358
  def reset_context(memory):
359
  memory.clear_target_context()
360
+ return memory, "<div class='terminal-status'>[ Target : Global (Derby/Influx) ] $ </div>"
361
 
362
  reset_ctx_btn = gr.Button("πŸ”„ Clear Target Context", scale=2)
363
  reset_ctx_btn.click(reset_context, inputs=[chat_memory], outputs=[chat_memory, status_bar])
364
 
 
365
  msg.submit(
366
  prepare_msg,
367
  inputs=[msg, chatbot],