prernajeet01 commited on
Commit
783e7ab
·
verified ·
1 Parent(s): b0392ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +101 -37
app.py CHANGED
@@ -8,7 +8,6 @@ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
  from langchain_community.embeddings import OpenAIEmbeddings
9
  from langchain_community.vectorstores import FAISS
10
  from langchain.chains import RetrievalQA
11
- # Use existing imports since langchain_aws is not available
12
  from langchain_community.chat_models import BedrockChat
13
  from langchain_openai import ChatOpenAI
14
  from langchain_community.llms import Ollama
@@ -246,12 +245,15 @@ def create_interface():
246
  """)
247
  return demo
248
 
249
- # Initialize agents - changed to initialize lazily to avoid startup errors
250
  audit_agents = {}
251
 
252
  with gr.Blocks(theme=gr.themes.Base()) as demo:
253
  gr.Markdown("# 🔍 Amy - Your Audit Copilot")
254
 
 
 
 
255
  with gr.Row():
256
  with gr.Column(scale=1):
257
  file_upload = gr.File(
@@ -293,9 +295,6 @@ def create_interface():
293
  )
294
  query_button = gr.Button("Query")
295
  query_output = gr.Markdown(label="Response")
296
-
297
- # Status indicator for initialization and operations
298
- status_message = gr.Textbox(label="Status", value="Ready")
299
 
300
  # Track the selected model
301
  selected_model = gr.State("claude-3-sonnet")
@@ -309,54 +308,119 @@ def create_interface():
309
 
310
  model_tabs.select(update_selected_model, outputs=[selected_model])
311
 
312
- # Lazy initialization of models when first used
313
  def get_or_initialize_agent(model_name):
314
- if model_name not in audit_agents:
315
- try:
316
- status_message.update(value=f"Initializing {model_name}...")
317
- config = llm_configs[model_name]
318
- audit_agents[model_name] = AuditAgent(config["name"], config["provider"])
319
- status_message.update(value=f"{model_name} initialized successfully")
320
- except Exception as e:
321
- status_message.update(value=f"Error initializing {model_name}: {str(e)}")
322
- return None
323
- return audit_agents[model_name]
 
 
 
 
 
 
 
 
 
 
324
 
 
325
  def handle_chat(query, model_name):
326
- agent = get_or_initialize_agent(model_name)
327
- if not agent:
328
- return f"Could not initialize {model_name}. Please check logs for details."
329
- return agent.process_query(query)
 
 
 
 
 
 
 
 
 
 
 
 
 
330
 
 
331
  def handle_problem(problem, model_name):
332
- agent = get_or_initialize_agent(model_name)
333
- if not agent:
334
- return f"Could not initialize {model_name}. Please check logs for details."
335
- return agent.process_query(problem)
 
 
 
 
 
 
 
 
 
 
 
 
336
 
 
337
  def handle_file_upload(file, model_name):
338
- agent = get_or_initialize_agent(model_name)
339
- if not agent:
340
- return f"Could not initialize {model_name}. Please check logs for details."
341
- return agent.process_documents(file)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
 
 
343
  def handle_query(query, model_name):
344
- agent = get_or_initialize_agent(model_name)
345
- if not agent:
346
- return f"Could not initialize {model_name}. Please check logs for details."
347
- return agent.query_documents(query)
 
 
 
 
 
 
 
 
 
 
 
 
348
 
349
- # Set up event handlers
350
  chat_button.click(
351
  handle_chat,
352
  inputs=[chat_input, selected_model],
353
- outputs=[chat_output]
354
  )
355
 
356
  solve_button.click(
357
  handle_problem,
358
  inputs=[problem_input, selected_model],
359
- outputs=[solution_output]
360
  )
361
 
362
  file_upload.upload(
@@ -368,11 +432,11 @@ def create_interface():
368
  query_button.click(
369
  handle_query,
370
  inputs=[query_input, selected_model],
371
- outputs=[query_output]
372
  )
373
 
374
  return demo
375
 
376
  if __name__ == "__main__":
377
  demo = create_interface()
378
- demo.launch(share=True)
 
8
  from langchain_community.embeddings import OpenAIEmbeddings
9
  from langchain_community.vectorstores import FAISS
10
  from langchain.chains import RetrievalQA
 
11
  from langchain_community.chat_models import BedrockChat
12
  from langchain_openai import ChatOpenAI
13
  from langchain_community.llms import Ollama
 
245
  """)
246
  return demo
247
 
248
+ # Initialize agents dictionary - will be initialized on demand
249
  audit_agents = {}
250
 
251
  with gr.Blocks(theme=gr.themes.Base()) as demo:
252
  gr.Markdown("# 🔍 Amy - Your Audit Copilot")
253
 
254
+ # Status indicator for initialization and operations
255
+ status_message = gr.Textbox(label="Status", value="Ready")
256
+
257
  with gr.Row():
258
  with gr.Column(scale=1):
259
  file_upload = gr.File(
 
295
  )
296
  query_button = gr.Button("Query")
297
  query_output = gr.Markdown(label="Response")
 
 
 
298
 
299
  # Track the selected model
300
  selected_model = gr.State("claude-3-sonnet")
 
308
 
309
  model_tabs.select(update_selected_model, outputs=[selected_model])
310
 
311
+ # COMPLETELY REVISED: Initialize an agent and return both agent and status message
312
  def get_or_initialize_agent(model_name):
313
+ """Initialize an agent if not already initialized and return a status message"""
314
+ init_message = f"Initializing {model_name}..."
315
+
316
+ # If agent already exists, return it with a status message
317
+ if model_name in audit_agents:
318
+ return audit_agents[model_name], f"{model_name} ready"
319
+
320
+ # Try to initialize the agent
321
+ try:
322
+ config = llm_configs[model_name]
323
+ logging.info(init_message)
324
+ agent = AuditAgent(config["name"], config["provider"])
325
+ audit_agents[model_name] = agent
326
+ success_message = f"{model_name} initialized successfully"
327
+ logging.info(success_message)
328
+ return agent, success_message
329
+ except Exception as e:
330
+ error_message = f"Error initializing {model_name}: {str(e)}"
331
+ logging.error(error_message)
332
+ return None, error_message
333
 
334
+ # Handle chat separately
335
  def handle_chat(query, model_name):
336
+ # First update status message
337
+ status = f"Processing query with {model_name}..."
338
+
339
+ # Get or initialize agent
340
+ agent, init_status = get_or_initialize_agent(model_name)
341
+
342
+ # If initialization failed
343
+ if agent is None:
344
+ return f"Could not initialize {model_name}. {init_status}", init_status
345
+
346
+ # Process the query
347
+ try:
348
+ result = agent.process_query(query)
349
+ return result, f"Query processed with {model_name}"
350
+ except Exception as e:
351
+ error_msg = f"Error processing query: {str(e)}"
352
+ return error_msg, error_msg
353
 
354
+ # Handle numerical problem
355
  def handle_problem(problem, model_name):
356
+ status = f"Solving problem with {model_name}..."
357
+
358
+ # Get or initialize agent
359
+ agent, init_status = get_or_initialize_agent(model_name)
360
+
361
+ # If initialization failed
362
+ if agent is None:
363
+ return f"Could not initialize {model_name}. {init_status}", init_status
364
+
365
+ # Process the problem
366
+ try:
367
+ result = agent.process_query(problem)
368
+ return result, f"Problem solved with {model_name}"
369
+ except Exception as e:
370
+ error_msg = f"Error solving problem: {str(e)}"
371
+ return error_msg, error_msg
372
 
373
+ # Handle file upload
374
  def handle_file_upload(file, model_name):
375
+ if file is None:
376
+ return "No file uploaded. Please upload a file."
377
+
378
+ status = f"Processing document with {model_name}..."
379
+
380
+ # Get or initialize agent
381
+ agent, init_status = get_or_initialize_agent(model_name)
382
+
383
+ # If initialization failed
384
+ if agent is None:
385
+ return init_status
386
+
387
+ # Process the document
388
+ try:
389
+ result = agent.process_documents(file)
390
+ return result
391
+ except Exception as e:
392
+ return f"Error processing document: {str(e)}"
393
 
394
+ # Handle document query
395
  def handle_query(query, model_name):
396
+ status = f"Querying documents with {model_name}..."
397
+
398
+ # Get or initialize agent
399
+ agent, init_status = get_or_initialize_agent(model_name)
400
+
401
+ # If initialization failed
402
+ if agent is None:
403
+ return f"Could not initialize {model_name}. {init_status}", init_status
404
+
405
+ # Query the documents
406
+ try:
407
+ result = agent.query_documents(query)
408
+ return result, f"Documents queried with {model_name}"
409
+ except Exception as e:
410
+ error_msg = f"Error querying documents: {str(e)}"
411
+ return error_msg, error_msg
412
 
413
+ # Set up event handlers - UPDATED to include status_message updates
414
  chat_button.click(
415
  handle_chat,
416
  inputs=[chat_input, selected_model],
417
+ outputs=[chat_output, status_message]
418
  )
419
 
420
  solve_button.click(
421
  handle_problem,
422
  inputs=[problem_input, selected_model],
423
+ outputs=[solution_output, status_message]
424
  )
425
 
426
  file_upload.upload(
 
432
  query_button.click(
433
  handle_query,
434
  inputs=[query_input, selected_model],
435
+ outputs=[query_output, status_message]
436
  )
437
 
438
  return demo
439
 
440
  if __name__ == "__main__":
441
  demo = create_interface()
442
+ demo.launch(share=True)