Prathamesh1420 commited on
Commit
83d682e
·
verified ·
1 Parent(s): 23f378c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -12
app.py CHANGED
@@ -389,11 +389,10 @@ def rag_pipeline(question):
389
  mlflow.log_dict(error_info, "artifacts/pipeline_errors.json")
390
  return f"Error: {str(e)}"
391
 
392
- # ----------- 7. Gradio UI with Enhanced Tracking -----------
393
  with gr.Blocks() as demo:
394
  gr.Markdown("# 🛠 Maintenance AI Assistant")
395
 
396
- # Track additional UI metrics
397
  usage_counter = gr.State(value=0)
398
  session_start = gr.State(value=datetime.now().isoformat())
399
 
@@ -402,36 +401,62 @@ with gr.Blocks() as demo:
402
  ask_button = gr.Button("Get Answer")
403
  feedback = gr.Radio(["Helpful", "Not Helpful"], label="Was this response helpful?")
404
 
405
- def track_usage(question, count, session_start, feedback=None):
406
- """Wrapper to track usage metrics with feedback"""
407
  count += 1
408
 
409
- # Start tracking context
410
  with mlflow.start_run(run_name=f"User-Interaction-{count}", nested=True):
411
  mlflow.log_param("question", question)
412
  mlflow.log_param("session_start", session_start)
413
 
414
- # Get response
415
- response = rag_pipeline(question)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416
 
417
  # Log feedback if provided
418
  if feedback:
419
  mlflow.log_param("user_feedback", feedback)
420
  mlflow.log_metric("helpful_responses", 1 if feedback == "Helpful" else 0)
421
 
422
- # Update metrics
423
  mlflow.log_metric("total_queries", count)
424
 
425
- return response, count, session_start
426
 
427
  ask_button.click(
428
- track_usage,
429
- inputs=[question_input, usage_counter, session_start],
430
  outputs=[answer_output, usage_counter, session_start]
431
  )
432
 
433
  feedback.change(
434
- lambda feedback, question, count, session_start: track_usage(question, count, session_start, feedback),
435
  inputs=[feedback, question_input, usage_counter, session_start],
436
  outputs=[answer_output, usage_counter, session_start]
437
  )
 
389
  mlflow.log_dict(error_info, "artifacts/pipeline_errors.json")
390
  return f"Error: {str(e)}"
391
 
392
+ # ----------- 7. Gradio UI with Streaming Response -----------
393
  with gr.Blocks() as demo:
394
  gr.Markdown("# 🛠 Maintenance AI Assistant")
395
 
 
396
  usage_counter = gr.State(value=0)
397
  session_start = gr.State(value=datetime.now().isoformat())
398
 
 
401
  ask_button = gr.Button("Get Answer")
402
  feedback = gr.Radio(["Helpful", "Not Helpful"], label="Was this response helpful?")
403
 
404
+ def track_usage_stream(question, count, session_start, feedback=None):
405
+ """Generator for streaming response token by token"""
406
  count += 1
407
 
 
408
  with mlflow.start_run(run_name=f"User-Interaction-{count}", nested=True):
409
  mlflow.log_param("question", question)
410
  mlflow.log_param("session_start", session_start)
411
 
412
+ # Retrieve context first
413
+ retrieved_context = get_retrieved_context(question)
414
+ mlflow.log_text(retrieved_context, "artifacts/retrieved_context.txt")
415
+
416
+ # Stream response in chunks
417
+ def response_generator():
418
+ # Here, simulate streaming by splitting into words or chunks
419
+ response_full = llm_chain.invoke({
420
+ "context": retrieved_context,
421
+ "question": question
422
+ })["text"].strip()
423
+
424
+ if "Answer:" in response_full:
425
+ response_full = response_full.split("Answer:", 1)[-1].strip()
426
+
427
+ # Yield response word by word (or token by token)
428
+ words = response_full.split()
429
+ current_text = ""
430
+ for word in words:
431
+ current_text += word + " "
432
+ yield current_text
433
+
434
+ # After streaming is done, log evaluation
435
+ evaluation_metrics = evaluator.evaluate_all(
436
+ question=question,
437
+ response=response_full,
438
+ reference=retrieved_context
439
+ )
440
+ for metric_name, metric_value in evaluation_metrics.items():
441
+ mlflow.log_metric(metric_name, metric_value)
442
 
443
  # Log feedback if provided
444
  if feedback:
445
  mlflow.log_param("user_feedback", feedback)
446
  mlflow.log_metric("helpful_responses", 1 if feedback == "Helpful" else 0)
447
 
 
448
  mlflow.log_metric("total_queries", count)
449
 
450
+ return response_generator(), count, session_start
451
 
452
  ask_button.click(
453
+ track_usage_stream,
454
+ inputs=[question_input, usage_counter, session_start, feedback],
455
  outputs=[answer_output, usage_counter, session_start]
456
  )
457
 
458
  feedback.change(
459
+ lambda feedback, question, count, session_start: track_usage_stream(question, count, session_start, feedback),
460
  inputs=[feedback, question_input, usage_counter, session_start],
461
  outputs=[answer_output, usage_counter, session_start]
462
  )