msIntui commited on
Commit
39d0031
·
1 Parent(s): 06070a9

Update DeepLSD installation and add error handling

Browse files
Files changed (2) hide show
  1. gradioChatApp.py +184 -125
  2. requirements.txt +8 -19
gradioChatApp.py CHANGED
@@ -525,137 +525,188 @@ custom_css = """
525
  }
526
  """
527
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
528
  def create_ui():
529
- with gr.Blocks(css=custom_css) as demo:
530
- # Logo row
531
- with gr.Row(elem_classes=["logo-row"]):
532
- try:
533
- logo_path = os.path.join(os.path.dirname(__file__), "assets", "intuigence.png")
534
- if os.path.exists(logo_path):
535
- with open(logo_path, "rb") as f:
536
- logo_base64 = base64.b64encode(f.read()).decode()
537
- gr.HTML(f"""
538
- <div style="text-align: center; padding: 10px; background-color: #1a1a1a; width: 100%;">
539
- <img src="data:image/png;base64,{logo_base64}"
540
- alt="Intuigence Logo"
541
- style="height: 60px; object-fit: contain;">
542
- </div>
543
- """)
544
- else:
545
- logger.warning(f"Logo not found at {logo_path}")
546
- except Exception as e:
547
- logger.error(f"Error loading logo: {e}")
548
-
549
- # Main layout
550
- with gr.Row(equal_height=True, elem_classes=["full-height-row"]):
551
- # Left column
552
- with gr.Column(scale=2):
553
- # Upload area
554
- with gr.Column(elem_classes=["upload-box"]):
555
- image_input = gr.File(
556
- label="Upload P&ID Document",
557
- file_types=[".pdf", ".png", ".jpg", ".jpeg"],
558
- file_count="single",
559
- type="filepath"
560
- )
561
-
562
- # Status area
563
- with gr.Column(elem_classes=["status-box-container"]):
564
- gr.Markdown("### Processing Status")
565
- progress_status = gr.Textbox(
566
- label="Status",
567
- show_label=False,
568
- elem_classes=["status-box"],
569
- lines=15,
570
- max_lines=20,
571
- interactive=False,
572
- autoscroll=True,
573
- value="" # Initialize with empty value
574
- )
575
- json_path_state = gr.State()
576
-
577
- # Center column
578
- with gr.Column(scale=5):
579
- with gr.Tabs(elem_classes=["preview-tabs"]) as tabs:
580
- with gr.TabItem("P&ID"):
581
- original_image = gr.Image(label="Original P&ID", height=450) # Reduced height
582
- with gr.TabItem("Symbols"):
583
- symbol_image = gr.Image(label="Detected Symbols", height=450)
584
- with gr.TabItem("Tags"):
585
- text_image = gr.Image(label="Detected Tags", height=450)
586
- with gr.TabItem("Pipelines"):
587
- line_image = gr.Image(label="Detected Lines", height=450)
588
- with gr.TabItem("Aggregated"):
589
- aggregated_image = gr.Image(label="Aggregated Results", height=450)
590
- with gr.TabItem("Graph"):
591
- graph_image = gr.Image(label="Knowledge Graph", height=450)
592
-
593
- # Right column
594
- with gr.Column(scale=3):
595
- with gr.Column(elem_classes=["chat-container"]):
596
- gr.Markdown("### Chat Interface")
597
- # Initialize chat with a welcome message
598
- initial_chat = chat_message(
599
- "agent",
600
- "Ready to process P&ID documents and answer questions.",
601
- agent_avatar,
602
- get_timestamp()
603
- )
604
- chat_output = gr.HTML(
605
- label="Chat",
606
- elem_classes=["chatbox"],
607
- value=initial_chat
608
- )
609
- # Message input and send button in a fixed-height container
610
- with gr.Column(elem_classes=["chat-input-group"]):
611
- user_input = gr.Textbox(
612
  show_label=False,
613
- placeholder="Type your question here...",
614
- elem_classes=["chat-input"],
615
- lines=3
 
 
 
616
  )
617
- send_button = gr.Button(
618
- "Send",
619
- elem_classes=["send-button"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
 
622
- # Set up event handlers inside the Blocks context
623
- image_input.upload(
624
- fn=process_pnid,
625
- inputs=[image_input, progress_status],
626
- outputs=[
627
- original_image,
628
- symbol_image,
629
- text_image,
630
- line_image,
631
- aggregated_image,
632
- graph_image,
633
- chat_output,
634
- progress_status,
635
- json_path_state
636
- ],
637
- show_progress="hidden" # Hide the default progress bar
638
- )
639
-
640
- # Add input clearing and enable/disable logic for chat
641
- def clear_and_handle_message(user_message, chat_history, json_path):
642
- response = handle_user_message(user_message, chat_history, json_path)
643
- return "", response # Clear input after sending
644
 
645
- send_button.click(
646
- fn=clear_and_handle_message,
647
- inputs=[user_input, chat_output, json_path_state],
648
- outputs=[user_input, chat_output]
649
- )
650
 
651
- # Also trigger on Enter key
652
- user_input.submit(
653
- fn=clear_and_handle_message,
654
- inputs=[user_input, chat_output, json_path_state],
655
- outputs=[user_input, chat_output]
656
- )
657
 
658
- return demo
 
 
 
 
 
 
 
 
 
659
 
660
  def main():
661
  demo = create_ui()
@@ -668,5 +719,13 @@ if __name__ == "__main__":
668
  main()
669
  else:
670
  # For Spaces deployment
671
- demo = create_ui()
672
- app = demo.app # Gradio requires 'app' variable for Spaces
 
 
 
 
 
 
 
 
 
525
  }
526
  """
527
 
528
+ def check_environment():
529
+ """Check required environment variables and model files."""
530
+ required_vars = {
531
+ 'OPENAI_API_KEY': 'OpenAI API key is required for chat functionality',
532
+ 'STORAGE_TYPE': 'Storage type must be specified (local or azure)',
533
+ 'USE_TORCH': 'Torch configuration must be specified'
534
+ }
535
+
536
+ missing_vars = []
537
+ for var, message in required_vars.items():
538
+ if not os.getenv(var):
539
+ missing_vars.append(f"{var}: {message}")
540
+
541
+ if missing_vars:
542
+ logger.error("Missing required environment variables:")
543
+ for msg in missing_vars:
544
+ logger.error(f" - {msg}")
545
+ return False
546
+
547
+ # Check for DeepLSD
548
+ try:
549
+ import deeplsd
550
+ logger.info("DeepLSD package found")
551
+ except ImportError:
552
+ logger.error("DeepLSD package not found. Please install with: pip install git+https://github.com/cvg/DeepLSD.git")
553
+ return False
554
+
555
+ # Check for model files
556
+ model_path = "models/deeplsd_md.tar"
557
+ if not os.path.exists(model_path):
558
+ logger.error(f"Required model file not found: {model_path}")
559
+ return False
560
+
561
+ return True
562
+
563
  def create_ui():
564
+ """Create the Gradio interface with error handling."""
565
+ try:
566
+ # Check environment before creating UI
567
+ if not check_environment():
568
+ raise EnvironmentError("Missing required configuration. Check logs for details.")
569
+
570
+ # Create UI components
571
+ with gr.Blocks(css=custom_css) as demo:
572
+ # Logo row
573
+ with gr.Row(elem_classes=["logo-row"]):
574
+ try:
575
+ logo_path = os.path.join(os.path.dirname(__file__), "assets", "intuigence.png")
576
+ if os.path.exists(logo_path):
577
+ with open(logo_path, "rb") as f:
578
+ logo_base64 = base64.b64encode(f.read()).decode()
579
+ gr.HTML(f"""
580
+ <div style="text-align: center; padding: 10px; background-color: #1a1a1a; width: 100%;">
581
+ <img src="data:image/png;base64,{logo_base64}"
582
+ alt="Intuigence Logo"
583
+ style="height: 60px; object-fit: contain;">
584
+ </div>
585
+ """)
586
+ else:
587
+ logger.warning(f"Logo not found at {logo_path}")
588
+ except Exception as e:
589
+ logger.error(f"Error loading logo: {e}")
590
+
591
+ # Main layout
592
+ with gr.Row(equal_height=True, elem_classes=["full-height-row"]):
593
+ # Left column
594
+ with gr.Column(scale=2):
595
+ # Upload area
596
+ with gr.Column(elem_classes=["upload-box"]):
597
+ image_input = gr.File(
598
+ label="Upload P&ID Document",
599
+ file_types=[".pdf", ".png", ".jpg", ".jpeg"],
600
+ file_count="single",
601
+ type="filepath"
602
+ )
603
+
604
+ # Status area
605
+ with gr.Column(elem_classes=["status-box-container"]):
606
+ gr.Markdown("### Processing Status")
607
+ progress_status = gr.Textbox(
608
+ label="Status",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
609
  show_label=False,
610
+ elem_classes=["status-box"],
611
+ lines=15,
612
+ max_lines=20,
613
+ interactive=False,
614
+ autoscroll=True,
615
+ value="" # Initialize with empty value
616
  )
617
+ json_path_state = gr.State()
618
+
619
+ # Center column
620
+ with gr.Column(scale=5):
621
+ with gr.Tabs(elem_classes=["preview-tabs"]) as tabs:
622
+ with gr.TabItem("P&ID"):
623
+ original_image = gr.Image(label="Original P&ID", height=450) # Reduced height
624
+ with gr.TabItem("Symbols"):
625
+ symbol_image = gr.Image(label="Detected Symbols", height=450)
626
+ with gr.TabItem("Tags"):
627
+ text_image = gr.Image(label="Detected Tags", height=450)
628
+ with gr.TabItem("Pipelines"):
629
+ line_image = gr.Image(label="Detected Lines", height=450)
630
+ with gr.TabItem("Aggregated"):
631
+ aggregated_image = gr.Image(label="Aggregated Results", height=450)
632
+ with gr.TabItem("Graph"):
633
+ graph_image = gr.Image(label="Knowledge Graph", height=450)
634
+
635
+ # Right column
636
+ with gr.Column(scale=3):
637
+ with gr.Column(elem_classes=["chat-container"]):
638
+ gr.Markdown("### Chat Interface")
639
+ # Initialize chat with a welcome message
640
+ initial_chat = chat_message(
641
+ "agent",
642
+ "Ready to process P&ID documents and answer questions.",
643
+ agent_avatar,
644
+ get_timestamp()
645
  )
646
+ chat_output = gr.HTML(
647
+ label="Chat",
648
+ elem_classes=["chatbox"],
649
+ value=initial_chat
650
+ )
651
+ # Message input and send button in a fixed-height container
652
+ with gr.Column(elem_classes=["chat-input-group"]):
653
+ user_input = gr.Textbox(
654
+ show_label=False,
655
+ placeholder="Type your question here...",
656
+ elem_classes=["chat-input"],
657
+ lines=3
658
+ )
659
+ send_button = gr.Button(
660
+ "Send",
661
+ elem_classes=["send-button"]
662
+ )
663
+
664
+ # Set up event handlers inside the Blocks context
665
+ image_input.upload(
666
+ fn=process_pnid,
667
+ inputs=[image_input, progress_status],
668
+ outputs=[
669
+ original_image,
670
+ symbol_image,
671
+ text_image,
672
+ line_image,
673
+ aggregated_image,
674
+ graph_image,
675
+ chat_output,
676
+ progress_status,
677
+ json_path_state
678
+ ],
679
+ show_progress="hidden" # Hide the default progress bar
680
+ )
681
 
682
+ # Add input clearing and enable/disable logic for chat
683
+ def clear_and_handle_message(user_message, chat_history, json_path):
684
+ response = handle_user_message(user_message, chat_history, json_path)
685
+ return "", response # Clear input after sending
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686
 
687
+ send_button.click(
688
+ fn=clear_and_handle_message,
689
+ inputs=[user_input, chat_output, json_path_state],
690
+ outputs=[user_input, chat_output]
691
+ )
692
 
693
+ # Also trigger on Enter key
694
+ user_input.submit(
695
+ fn=clear_and_handle_message,
696
+ inputs=[user_input, chat_output, json_path_state],
697
+ outputs=[user_input, chat_output]
698
+ )
699
 
700
+ return demo
701
+ except Exception as e:
702
+ logger.error(f"Error creating UI: {str(e)}")
703
+ logger.error(traceback.format_exc())
704
+ # Create a minimal UI showing the error
705
+ with gr.Blocks() as error_demo:
706
+ gr.Markdown("# ⚠️ Configuration Error")
707
+ gr.Markdown(f"Error: {str(e)}")
708
+ gr.Markdown("Please check the logs and configuration.")
709
+ return error_demo
710
 
711
  def main():
712
  demo = create_ui()
 
719
  main()
720
  else:
721
  # For Spaces deployment
722
+ try:
723
+ demo = create_ui()
724
+ app = demo.app
725
+ except Exception as e:
726
+ logger.error(f"Failed to initialize app: {str(e)}")
727
+ # Create minimal error app
728
+ with gr.Blocks() as error_demo:
729
+ gr.Markdown("# ⚠️ Deployment Error")
730
+ gr.Markdown("Failed to initialize the application.")
731
+ app = error_demo.app
requirements.txt CHANGED
@@ -2,40 +2,29 @@
2
  gradio>=4.0.0
3
  numpy>=1.24.0
4
  Pillow>=8.0.0
5
- opencv-python-headless>=4.8.0
6
- PyMuPDF>=1.18.0 # for PDF processing
7
 
8
  # OCR Engines
9
  pytesseract>=0.3.8
10
  easyocr>=1.7.1
11
- python-doctr>=0.7.0 # For DocTR OCR
12
- tensorflow>=2.8.0 # Required by DocTR
13
 
14
  # Deep Learning
15
  torch>=2.1.0
16
  torchvision>=0.15.0
17
- ultralytics>=8.0.0 # for YOLO models
18
- deeplsd # Add this for line detection
19
- omegaconf>=2.3.0 # Required by DeepLSD
20
 
21
  # Graph Processing
22
  networkx>=2.6.0
23
  plotly>=5.3.0
24
 
25
  # Utilities
26
- tqdm>=4.66.0
27
  python-dotenv>=0.19.0
28
- uuid>=1.30
29
- shapely>=1.8.0 # for geometry operations
30
-
31
- # Azure Storage
32
- azure-storage-blob>=12.0.0
33
- azure-core>=1.24.0
34
-
35
- # AI/Chat
36
- openai>=1.0.0 # For ChatGPT integration
37
  loguru>=0.7.0
38
  matplotlib>=3.4.0
39
 
40
- # Added from the code block
41
- requests>=2.31.0
 
2
  gradio>=4.0.0
3
  numpy>=1.24.0
4
  Pillow>=8.0.0
5
+ opencv-python-headless>=4.8.0 # Headless version for Spaces
6
+ PyMuPDF>=1.18.0
7
 
8
  # OCR Engines
9
  pytesseract>=0.3.8
10
  easyocr>=1.7.1
11
+ python-doctr>=0.7.0
12
+ tensorflow>=2.8.0
13
 
14
  # Deep Learning
15
  torch>=2.1.0
16
  torchvision>=0.15.0
17
+ ultralytics>=8.0.0
18
+ git+https://github.com/cvg/DeepLSD.git # Install DeepLSD from GitHub
 
19
 
20
  # Graph Processing
21
  networkx>=2.6.0
22
  plotly>=5.3.0
23
 
24
  # Utilities
 
25
  python-dotenv>=0.19.0
 
 
 
 
 
 
 
 
 
26
  loguru>=0.7.0
27
  matplotlib>=3.4.0
28
 
29
+ # AI/Chat
30
+ openai>=1.0.0