LYL1015 commited on
Commit
f1a711b
·
verified ·
1 Parent(s): 48f21b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +117 -123
app.py CHANGED
@@ -369,154 +369,148 @@ def process_full_pipeline(image):
369
  if image is None:
370
  return [], None
371
 
372
- try:
373
- # Get original image size for later restoration
374
- original_img = Image.open(image)
375
- original_size = original_img.size
376
-
377
- # Initialize chat history for UI
378
- chat_history = [("Image uploaded for analysis", None)]
379
-
380
- # Step 1: Get streaming LLM response
381
- streamer = get_llm_response_streaming(image)
382
-
383
- # Stream the response to UI with real-time updates
384
- full_response = ""
385
- in_reason = False
386
- in_answer = False
387
- reason_displayed = False
388
- answer_displayed = False
389
- reasoning_added = False # Track if reasoning entry was added
 
 
 
390
 
391
- for new_text in streamer:
392
- full_response += new_text
 
 
 
 
 
 
 
 
 
 
393
 
394
- # Check if we're entering reason section or if we need to start showing content
395
- if ('<reason>' in full_response and not in_reason and not reason_displayed) or (not reasoning_added and not in_reason and not reason_displayed):
396
- in_reason = True
397
- reasoning_added = True
 
 
 
 
 
 
 
 
398
 
 
 
 
 
 
 
 
399
  if '<reason>' in full_response:
400
- # Extract content after <reason>
401
  reason_start = full_response.find('<reason>') + len('<reason>')
402
  reason_content = full_response[reason_start:].strip()
403
  else:
404
- # Show all content as reasoning if no tag yet
405
  reason_content = full_response.strip()
406
 
407
- # Add reasoning to chat history
408
- chat_history.append((None, f"**🤔 Analysis & Reasoning:**\n\n{reason_content}"))
409
  yield chat_history, None
 
 
 
 
 
 
 
410
 
411
- # If we're in reason section, update content
412
- elif in_reason and not reason_displayed:
413
- # Check if reason section is complete
414
- if '</reason>' in full_response:
415
- # Extract complete reason content
416
- reason_start = full_response.find('<reason>') + len('<reason>')
417
- reason_end = full_response.find('</reason>')
418
- reason_content = full_response[reason_start:reason_end].strip()
419
-
420
- # Update chat history with complete reason
421
- chat_history[1] = (None, f"**🤔 Analysis & Reasoning:**\n\n{reason_content}")
422
- reason_displayed = True
423
- in_reason = False
424
- yield chat_history, None
425
- else:
426
- # Continue streaming reason content
427
- if '<reason>' in full_response:
428
- reason_start = full_response.find('<reason>') + len('<reason>')
429
- reason_content = full_response[reason_start:].strip()
430
- else:
431
- reason_content = full_response.strip()
432
-
433
- # Update chat history with partial reason
434
- chat_history[1] = (None, f"**🤔 Analysis & Reasoning:**\n\n{reason_content}")
435
- yield chat_history, None
436
-
437
- # Check if we're entering answer section
438
- elif '<answer>' in full_response and not in_answer and not answer_displayed and reason_displayed:
439
- in_answer = True
440
- # Extract content after <answer>
441
  answer_start = full_response.find('<answer>') + len('<answer>')
442
- answer_content = full_response[answer_start:]
 
443
 
444
- # Add partial answer to chat history
445
  models = extract_models_from_answer(answer_content)
446
  beautified = beautify_recommended_actions(answer_content, models)
447
- chat_history.append((None, beautified))
 
 
448
  yield chat_history, None
449
-
450
- # If we're in answer section, update content
451
- elif in_answer and not answer_displayed:
452
- # Check if answer section is complete
453
- if '</answer>' in full_response:
454
- # Extract complete answer content
455
- answer_start = full_response.find('<answer>') + len('<answer>')
456
- answer_end = full_response.find('</answer>')
457
- answer_content = full_response[answer_start:answer_end].strip()
458
-
459
- # Parse and process final answer
460
- models = extract_models_from_answer(answer_content)
461
- beautified = beautify_recommended_actions(answer_content, models)
462
- chat_history[2] = (None, beautified)
463
- answer_displayed = True
464
- in_answer = False
465
  yield chat_history, None
466
 
467
- # Process image with tools
468
- if models:
469
- chat_history.append((None, "**🔄 Processing image...**"))
470
- yield chat_history, None
471
-
472
- processed_image = process_image_with_tools(image, models, original_size)
473
- chat_history[-1] = (None, "**✅ Processing Complete!**")
474
- yield chat_history, processed_image
475
- return
476
- else:
477
- chat_history.append((None, "**❌ No valid models found in the response**"))
478
- yield chat_history, None
479
- return
480
  else:
481
- # Continue streaming answer content
482
- answer_start = full_response.find('<answer>') + len('<answer>')
483
- answer_content = full_response[answer_start:].strip()
484
-
485
- # Update chat history with partial answer
486
- models = extract_models_from_answer(answer_content)
487
- beautified = beautify_recommended_actions(answer_content, models)
488
- chat_history[2] = (None, beautified)
489
  yield chat_history, None
490
-
491
- # Fallback if streaming completes without proper tags
492
- if not answer_displayed:
493
- reason, answer = parse_llm_response(full_response)
494
- models = extract_models_from_answer(answer)
495
-
496
- chat_history = [
497
- ("Image uploaded for analysis", None),
498
- (None, f"**🤔 Analysis & Reasoning:**\n\n{reason}"),
499
- (None, beautify_recommended_actions(answer, models))
500
- ]
501
-
502
- if models:
503
- chat_history.append((None, "**🔄 Processing image...**"))
504
- yield chat_history, None
505
-
506
- processed_image = process_image_with_tools(image, models, original_size)
507
- chat_history[-1] = (None, "**✅ Processing Complete!**")
508
- yield chat_history, processed_image
509
  else:
510
- chat_history.append((None, "**❌ No valid models found in the response**"))
 
 
 
 
 
 
 
511
  yield chat_history, None
512
-
513
- except Exception as e:
514
- error_msg = f"Error: {str(e)}"
 
 
 
515
  chat_history = [
516
  ("Image uploaded for analysis", None),
517
- (None, f"** Error occurred:**\n\n{error_msg}")
 
518
  ]
519
- yield chat_history, None
 
 
 
 
 
 
 
 
 
 
 
 
520
 
521
  # Create Gradio interface
522
  def create_interface():
 
369
  if image is None:
370
  return [], None
371
 
372
+
373
+ # Get original image size for later restoration
374
+ original_img = Image.open(image)
375
+ original_size = original_img.size
376
+
377
+ # Initialize chat history for UI
378
+ chat_history = [("Image uploaded for analysis", None)]
379
+
380
+ # Step 1: Get streaming LLM response
381
+ streamer = get_llm_response_streaming(image)
382
+
383
+ # Stream the response to UI with real-time updates
384
+ full_response = ""
385
+ in_reason = False
386
+ in_answer = False
387
+ reason_displayed = False
388
+ answer_displayed = False
389
+ reasoning_added = False # Track if reasoning entry was added
390
+
391
+ for new_text in streamer:
392
+ full_response += new_text
393
 
394
+ # Check if we're entering reason section or if we need to start showing content
395
+ if ('<reason>' in full_response and not in_reason and not reason_displayed) or (not reasoning_added and not in_reason and not reason_displayed):
396
+ in_reason = True
397
+ reasoning_added = True
398
+
399
+ if '<reason>' in full_response:
400
+ # Extract content after <reason>
401
+ reason_start = full_response.find('<reason>') + len('<reason>')
402
+ reason_content = full_response[reason_start:].strip()
403
+ else:
404
+ # Show all content as reasoning if no tag yet
405
+ reason_content = full_response.strip()
406
 
407
+ # Add reasoning to chat history
408
+ chat_history.append((None, f"**🤔 Analysis & Reasoning:**\n\n{reason_content}"))
409
+ yield chat_history, None
410
+
411
+ # If we're in reason section, update content
412
+ elif in_reason and not reason_displayed:
413
+ # Check if reason section is complete
414
+ if '</reason>' in full_response:
415
+ # Extract complete reason content
416
+ reason_start = full_response.find('<reason>') + len('<reason>')
417
+ reason_end = full_response.find('</reason>')
418
+ reason_content = full_response[reason_start:reason_end].strip()
419
 
420
+ # Update chat history with complete reason
421
+ chat_history[1] = (None, f"**🤔 Analysis & Reasoning:**\n\n{reason_content}")
422
+ reason_displayed = True
423
+ in_reason = False
424
+ yield chat_history, None
425
+ else:
426
+ # Continue streaming reason content
427
  if '<reason>' in full_response:
 
428
  reason_start = full_response.find('<reason>') + len('<reason>')
429
  reason_content = full_response[reason_start:].strip()
430
  else:
 
431
  reason_content = full_response.strip()
432
 
433
+ # Update chat history with partial reason
434
+ chat_history[1] = (None, f"**🤔 Analysis & Reasoning:**\n\n{reason_content}")
435
  yield chat_history, None
436
+
437
+ # Check if we're entering answer section
438
+ elif '<answer>' in full_response and not in_answer and not answer_displayed and reason_displayed:
439
+ in_answer = True
440
+ # Extract content after <answer>
441
+ answer_start = full_response.find('<answer>') + len('<answer>')
442
+ answer_content = full_response[answer_start:]
443
 
444
+ # Add partial answer to chat history
445
+ models = extract_models_from_answer(answer_content)
446
+ beautified = beautify_recommended_actions(answer_content, models)
447
+ chat_history.append((None, beautified))
448
+ yield chat_history, None
449
+
450
+ # If we're in answer section, update content
451
+ elif in_answer and not answer_displayed:
452
+ # Check if answer section is complete
453
+ if '</answer>' in full_response:
454
+ # Extract complete answer content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455
  answer_start = full_response.find('<answer>') + len('<answer>')
456
+ answer_end = full_response.find('</answer>')
457
+ answer_content = full_response[answer_start:answer_end].strip()
458
 
459
+ # Parse and process final answer
460
  models = extract_models_from_answer(answer_content)
461
  beautified = beautify_recommended_actions(answer_content, models)
462
+ chat_history[2] = (None, beautified)
463
+ answer_displayed = True
464
+ in_answer = False
465
  yield chat_history, None
466
+
467
+ # Process image with tools
468
+ if models:
469
+ chat_history.append((None, "**🔄 Processing image...**"))
 
 
 
 
 
 
 
 
 
 
 
 
470
  yield chat_history, None
471
 
472
+ processed_image = process_image_with_tools(image, models, original_size)
473
+ chat_history[-1] = (None, "**✅ Processing Complete!**")
474
+ yield chat_history, processed_image
475
+ return
 
 
 
 
 
 
 
 
 
476
  else:
477
+ chat_history.append((None, "**❌ No valid models found in the response**"))
 
 
 
 
 
 
 
478
  yield chat_history, None
479
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480
  else:
481
+ # Continue streaming answer content
482
+ answer_start = full_response.find('<answer>') + len('<answer>')
483
+ answer_content = full_response[answer_start:].strip()
484
+
485
+ # Update chat history with partial answer
486
+ models = extract_models_from_answer(answer_content)
487
+ beautified = beautify_recommended_actions(answer_content, models)
488
+ chat_history[2] = (None, beautified)
489
  yield chat_history, None
490
+
491
+ # Fallback if streaming completes without proper tags
492
+ if not answer_displayed:
493
+ reason, answer = parse_llm_response(full_response)
494
+ models = extract_models_from_answer(answer)
495
+
496
  chat_history = [
497
  ("Image uploaded for analysis", None),
498
+ (None, f"**🤔 Analysis & Reasoning:**\n\n{reason}"),
499
+ (None, beautify_recommended_actions(answer, models))
500
  ]
501
+
502
+ if models:
503
+ chat_history.append((None, "**🔄 Processing image...**"))
504
+ yield chat_history, None
505
+
506
+ processed_image = process_image_with_tools(image, models, original_size)
507
+ chat_history[-1] = (None, "**✅ Processing Complete!**")
508
+ yield chat_history, processed_image
509
+ else:
510
+ chat_history.append((None, "**❌ No valid models found in the response**"))
511
+ yield chat_history, None
512
+
513
+
514
 
515
  # Create Gradio interface
516
  def create_interface():