Spaces:
Sleeping
Sleeping
| # app.py | |
| import streamlit as st | |
| import os | |
| from pathlib import Path | |
| import tempfile | |
| # Import local modules | |
| from components.notebook_ui import NotebookUI | |
| from utils.document_processor import DocumentProcessor | |
| from utils.llm_gateway import LLMGateway | |
| from utils.ontology_manager import LegalOntology | |
| class LegalAIApp: | |
| def __init__(self): | |
| self.setup_environment() | |
| self.setup_session_state() | |
| self.init_components() | |
| self.setup_page_config() | |
| def setup_environment(self): | |
| """Setup environment variables and data directories""" | |
| self.anthropic_api_key = os.environ.get('ANTHROPIC_API_KEY') | |
| if not self.anthropic_api_key: | |
| st.error("Please set the ANTHROPIC_API_KEY environment variable") | |
| st.stop() | |
| # Setup data directory | |
| data_dir = Path("data") # Local directory instead of root | |
| data_dir.mkdir(parents=True, exist_ok=True) | |
| def setup_session_state(self): | |
| """Initialize session state variables""" | |
| if 'processed_docs' not in st.session_state: | |
| st.session_state.processed_docs = {} | |
| if 'conversation_history' not in st.session_state: | |
| st.session_state.conversation_history = [] | |
| def init_components(self): | |
| """Initialize application components""" | |
| # Initialize components | |
| self.llm = LLMGateway(self.anthropic_api_key) | |
| self.doc_processor = DocumentProcessor() | |
| self.notebook_ui = NotebookUI() | |
| self.legal_ontology = LegalOntology() | |
| def setup_page_config(self): | |
| """Configure Streamlit page""" | |
| st.set_page_config( | |
| page_title="Legal AI Assistant", | |
| page_icon="⚖️", | |
| layout="wide", | |
| initial_sidebar_state="expanded" | |
| ) | |
| def show_disclaimer(self): | |
| """Show legal disclaimer""" | |
| with st.expander("ℹ️ Legal Disclaimer", expanded=False): | |
| st.warning( | |
| "This tool is for research assistance only and does not constitute " | |
| "legal advice. Always consult with a qualified legal professional." | |
| ) | |
| def render_sidebar(self): | |
| """Render sidebar content""" | |
| st.sidebar.header("Document Upload") | |
| # Jurisdiction selection | |
| jurisdiction = st.sidebar.selectbox( | |
| "Select Jurisdiction", | |
| ["UK", "India", "UAE"] | |
| ) | |
| # File uploader | |
| uploaded_files = st.sidebar.file_uploader( | |
| "Upload Documents", | |
| accept_multiple_files=True, | |
| type=['pdf', 'docx', 'txt', 'jpg', 'png'] | |
| ) | |
| if uploaded_files: | |
| self.process_uploaded_files(uploaded_files) | |
| return jurisdiction | |
| def process_uploaded_files(self, files): | |
| """Process uploaded files""" | |
| for file in files: | |
| if file.name not in st.session_state.processed_docs: | |
| with st.spinner(f"Processing {file.name}..."): | |
| try: | |
| # Process document directly from uploaded file | |
| doc_content = self.doc_processor.process_document(file) | |
| if doc_content: | |
| chunks = self.doc_processor.chunk_document(doc_content) | |
| # Store in session state | |
| st.session_state.processed_docs[file.name] = { | |
| 'content': doc_content, | |
| 'chunks': chunks, | |
| 'status': 'processed' | |
| } | |
| st.sidebar.success(f"✔️ Processed {file.name}") | |
| else: | |
| st.sidebar.error(f"❌ Could not extract text from {file.name}") | |
| except Exception as e: | |
| st.sidebar.error(f"❌ Error processing {file.name}: {str(e)}") | |
| def render_analysis_options(self, jurisdiction): | |
| """Render analysis template options""" | |
| st.subheader("Analysis Options") | |
| analysis_type = st.selectbox( | |
| "Select Analysis Type", | |
| [ | |
| "Document Summary", | |
| "Key Elements Analysis", | |
| "Risk Assessment", | |
| "Chronological Summary", | |
| "Stakeholder Analysis" | |
| ] | |
| ) | |
| # Get case type based on document content or user selection | |
| case_type = st.selectbox( | |
| "Select Case Type", | |
| ["contract", "commercial", "employment", "dispute_resolution"] | |
| ) | |
| if st.button("Generate Analysis"): | |
| template = self.legal_ontology.generate_analysis_template(case_type, jurisdiction) | |
| # Get relevant document content | |
| doc_content = "" | |
| for doc in st.session_state.processed_docs.values(): | |
| doc_content += doc['content'] + "\n\n" | |
| # Generate analysis using LLM | |
| analysis = self.llm.generate( | |
| f"Based on the following template and document content, provide a detailed legal analysis:\n\n" | |
| f"Template:\n{template}\n\n" | |
| f"Document Content:\n{doc_content}" | |
| ) | |
| # Add to notebook | |
| self.notebook_ui.add_cell("analysis", { | |
| 'template': analysis_type, | |
| 'content': analysis, | |
| 'metadata': { | |
| 'jurisdiction': jurisdiction, | |
| 'case_type': case_type | |
| } | |
| }) | |
| def render_main_content(self): | |
| """Render main content area""" | |
| st.title("Legal AI Assistant") | |
| # Get selected jurisdiction | |
| jurisdiction = self.render_sidebar() | |
| # Show analysis options | |
| self.render_analysis_options(jurisdiction) | |
| # Render notebook cells | |
| st.header("Analysis Results") | |
| self.notebook_ui.render_cells() | |
| def run(self): | |
| """Run the application""" | |
| self.show_disclaimer() | |
| self.render_main_content() | |
| if __name__ == "__main__": | |
| app = LegalAIApp() | |
| app.run() |