Spaces:
Build error
Build error
File size: 13,088 Bytes
65e8156 82781b0 65e8156 f1a4ba2 a685f5a 62d7d31 f1a4ba2 dc3b7e9 53dcfb5 dc3b7e9 7491825 1628132 077922e 1628132 dc3b7e9 53dcfb5 dc3b7e9 13fe8cc 53dcfb5 13fe8cc dc3b7e9 13fe8cc dc3b7e9 13fe8cc dc3b7e9 53dcfb5 13fe8cc dc3b7e9 53dcfb5 13fe8cc 53dcfb5 13fe8cc 53dcfb5 13fe8cc 53dcfb5 13fe8cc 53dcfb5 13fe8cc 53dcfb5 dc3b7e9 53dcfb5 dc3b7e9 53dcfb5 dc3b7e9 13fe8cc 53dcfb5 a685f5a b898c42 13fe8cc 81e3240 dc3b7e9 13fe8cc dc3b7e9 13fe8cc dc3b7e9 81e3240 53dcfb5 f1a4ba2 69d7210 772ea5f dc3b7e9 53dcfb5 dc3b7e9 53dcfb5 dc3b7e9 ca49412 76d8637 ca49412 cd6801c ca49412 cd6801c ca49412 772ea5f ca49412 772ea5f dc3b7e9 13fe8cc 53dcfb5 13fe8cc 53dcfb5 772ea5f 1628132 53dcfb5 13fe8cc 077922e 8c642bc 077922e a685f5a dc3b7e9 13fe8cc a685f5a 13fe8cc f1a4ba2 13fe8cc 077922e f1a4ba2 a685f5a 13fe8cc dc3b7e9 53dcfb5 13fe8cc dc3b7e9 13fe8cc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 |
import chardet
import pypdf
import docx
from pdf2image import convert_from_bytes
import pytesseract
from PIL import Image
from typing import Tuple, List, Dict, Optional
import json
import os
import re
from datetime import datetime
import spacy
import nltk
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
from pathlib import Path
import streamlit as st
import shutil
import poppler
class DocumentProcessor:
def __init__(self, base_path: str = None):
"""Initialize Document Processor with proper data directory handling."""
self.base_path = self._setup_data_directories(base_path)
self.ontology_path = os.path.join(self.base_path, "ontology", "legal_ontology.json")
# Initialize NLP components
self._initialize_nlp()
# Ensure ontology exists
self._ensure_ontology_exists()
# Load ontology
self.ontology = self._load_ontology()
# Create processing directories
self.processed_path = os.path.join(self.base_path, "processed")
self.temp_path = os.path.join(self.base_path, "temp")
os.makedirs(self.processed_path, exist_ok=True)
os.makedirs(self.temp_path, exist_ok=True)
def _setup_data_directories(self, base_path: Optional[str] = None) -> str:
"""Set up data directories with error handling."""
data_path = base_path or os.path.join(os.getcwd(), "data")
subdirs = ["ontology", "processed", "temp", "indexes"]
for subdir in subdirs:
os.makedirs(os.path.join(data_path, subdir), exist_ok=True)
return data_path
def _initialize_nlp(self):
"""Initialize NLP components."""
try:
# Load spaCy model
try:
self.nlp = spacy.load("en_core_web_sm")
except OSError:
st.info("Downloading spaCy model...")
os.system("python -m spacy download en_core_web_sm")
self.nlp = spacy.load("en_core_web_sm")
# Initialize NLTK
nltk_data_dir = os.path.join(self.base_path, "nltk_data")
os.makedirs(nltk_data_dir, exist_ok=True)
nltk.data.path.append(nltk_data_dir)
required_resources = ['punkt', 'averaged_perceptron_tagger', 'maxent_ne_chunker', 'words', 'stopwords']
for resource in required_resources:
try:
nltk.download(resource, download_dir=nltk_data_dir, quiet=True)
except Exception as e:
st.warning(f"Could not download {resource}: {str(e)}")
self.stop_words = set(nltk.corpus.stopwords.words('english'))
except Exception as e:
st.error(f"Error initializing NLP components: {str(e)}")
raise
def _ensure_ontology_exists(self):
"""Ensure the legal ontology file exists, create if not."""
if not os.path.exists(self.ontology_path):
default_ontology = {
"@graph": [
{
"@id": "concept:Contract",
"@type": "vocab:LegalConcept",
"rdfs:label": "Contract",
"rdfs:comment": "A legally binding agreement between parties",
"vocab:relatedConcepts": ["Offer", "Acceptance", "Consideration"]
},
{
"@id": "concept:Judgment",
"@type": "vocab:LegalConcept",
"rdfs:label": "Judgment",
"rdfs:comment": "A court's final determination",
"vocab:relatedConcepts": ["Court Order", "Decision", "Ruling"]
}
]
}
with open(self.ontology_path, 'w') as f:
json.dump(default_ontology, f, indent=2)
def _load_ontology(self) -> Dict:
"""Load legal ontology with error handling."""
try:
with open(self.ontology_path, 'r') as f:
return json.load(f)
except Exception as e:
st.error(f"Error loading ontology: {str(e)}")
return {"@graph": []}
def process_and_tag_document(self, file) -> Tuple[str, List[Dict], Dict]:
"""Process document and generate metadata."""
try:
doc_id = datetime.now().strftime('%Y%m%d_%H%M%S')
doc_dir = os.path.join(self.processed_path, doc_id)
os.makedirs(doc_dir, exist_ok=True)
original_path = os.path.join(doc_dir, "original" + Path(file.name).suffix)
with open(original_path, 'wb') as f:
f.write(file.getvalue())
# Extract text and process document
text, chunks = self.process_document(original_path)
metadata = self._extract_metadata(text, file.name)
metadata.update({"doc_id": doc_id, "original_path": original_path})
# Save processed data
self._save_processed_data(doc_dir, text, chunks, metadata)
return text, chunks, metadata
except Exception as e:
st.error(f"Error in document processing pipeline: {str(e)}")
raise
def _tokenize_text(self, text: str) -> List[str]:
"""Tokenize text into sentences using NLTK."""
try:
return sent_tokenize(text)
except Exception:
return [sentence.strip() for sentence in text.split('.') if sentence.strip()]
def process_document(self, file_path: str) -> Tuple[str, List[Dict]]:
"""Process a document based on its type."""
file_type = Path(file_path).suffix.lower()
if file_type == '.pdf':
text = self._process_pdf(file_path)
elif file_type == '.docx':
text = self._process_docx(file_path)
elif file_type in ['.txt', '.csv']:
text = self._process_text(file_path)
else:
raise ValueError(f"Unsupported file type: {file_type}")
chunks = self._create_chunks(text)
return text, chunks
def process_pdf(self, file_path: str) -> Optional[str]:
try:
# First verify if poppler is installed via package manager
try:
subprocess.check_output(['pdftoppm', '-v'], stderr=subprocess.STDOUT)
st.success("✓ Poppler found on system")
except (subprocess.CalledProcessError, FileNotFoundError):
# If not in default path, check common installation directories
poppler_paths = [
"/usr/bin",
"/usr/local/bin",
"/opt/poppler/bin",
"/app/.apt/usr/bin", # Common HF Spaces path
os.path.expanduser("~/.local/bin")
]
for poppler_dir in poppler_paths:
if os.path.exists(os.path.join(poppler_dir, "pdftoppm")):
# Update PATH
os.environ["PATH"] = f"{poppler_dir}:{os.environ.get('PATH', '')}"
st.success(f"✓ Found Poppler in {poppler_dir}")
break
else:
st.error("❌ Poppler not found. Please ensure 'poppler-utils' is in packages.txt")
return None
# Attempt to read and convert the PDF
try:
with open(file_path, 'rb') as pdf_file:
pdf_bytes = pdf_file.read()
# Convert PDF to images
images = convert_from_bytes(
pdf_bytes,
dpi=300, # Increase DPI for better OCR quality
fmt='png'
)
# Process each page
text = ""
total_pages = len(images)
for page_num, image in enumerate(images, 1):
st.progress(page_num / total_pages)
st.info(f"📄 Processing page {page_num}/{total_pages}")
# Perform OCR with custom configuration
page_text = pytesseract.image_to_string(
image,
config='--psm 3 --oem 3' # Use default page segmentation and OCR Engine Mode
)
text += f"\n{'='*20} Page {page_num} {'='*20}\n{page_text}\n"
return text.strip()
except Exception as e:
st.error(f"Error processing PDF content: {str(e)}")
return None
except Exception as e:
st.error(f"Unexpected error: {str(e)}")
return None
def _process_docx(self, file_path: str) -> str:
"""Extract text from DOCX files."""
try:
doc = docx.Document(file_path)
return "\n".join(para.text for para in doc.paragraphs if para.text.strip())
except Exception as e:
st.error(f"Error processing DOCX: {str(e)}")
raise
def _process_text(self, file_path: str) -> str:
"""Process plain text files."""
try:
with open(file_path, 'rb') as f:
raw_data = f.read()
encoding = chardet.detect(raw_data).get('encoding', 'utf-8')
return raw_data.decode(encoding)
except Exception as e:
st.error(f"Error processing text file: {str(e)}")
raise
def _create_chunks(self, text: str) -> List[Dict]:
"""Chunk text for further processing."""
sentences = self._tokenize_text(text)
chunk_size = 500
chunks = []
current_chunk, current_length = [], 0
for sentence in sentences:
if current_length + len(sentence) > chunk_size and current_chunk:
chunks.append(self._process_chunk(' '.join(current_chunk), len(chunks)))
current_chunk, current_length = [], 0
current_chunk.append(sentence)
current_length += len(sentence)
if current_chunk:
chunks.append(self._process_chunk(' '.join(current_chunk), len(chunks)))
return chunks
def _link_to_ontology(self, text: str) -> List[Dict]:
"""Link text to legal ontology concepts."""
relevant_concepts = []
text_lower = text.lower()
for concept in self.ontology.get("@graph", []):
if "rdfs:label" not in concept:
continue
label = concept["rdfs:label"].lower()
if label in text_lower:
# Get surrounding context
start_idx = text_lower.index(label)
context_start = max(0, start_idx - 100)
context_end = min(len(text), start_idx + len(label) + 100)
relevant_concepts.append({
"concept": concept["rdfs:label"],
"type": concept.get("@type", "Unknown"),
"description": concept.get("rdfs:comment", ""),
"context": text[context_start:context_end].strip(),
"location": {"start": start_idx, "end": start_idx + len(label)}
})
return relevant_concepts
def _process_chunk(self, text: str, chunk_id: int) -> Dict:
"""Process individual chunks with NLP and ontology linking."""
doc = self.nlp(text)
return {
'chunk_id': chunk_id,
'text': text,
'entities': [(ent.text, ent.label_) for ent in doc.ents],
'noun_phrases': [np.text for np in doc.noun_chunks],
'ontology_links': self._link_to_ontology(text)
}
def _extract_metadata(self, text: str, file_name: str) -> Dict:
"""Extract metadata from text."""
doc = self.nlp(text[:10000])
return {
'filename': file_name,
'file_type': Path(file_name).suffix.lower(),
'processed_at': datetime.now().isoformat(),
'entities': [(ent.text, ent.label_) for ent in doc.ents],
'document_type': 'Legal Document'
}
def _save_processed_data(self, doc_dir: str, text: str, chunks: List[Dict], metadata: Dict):
"""Save processed data to disk."""
with open(os.path.join(doc_dir, "processed.txt"), 'w', encoding='utf-8') as f:
f.write(text)
with open(os.path.join(doc_dir, "chunks.json"), 'w') as f:
json.dump(chunks, f, indent=2)
with open(os.path.join(doc_dir, "metadata.json"), 'w') as f:
json.dump(metadata, f, indent=2)
def cleanup(self):
"""Clean up temporary files."""
shutil.rmtree(self.temp_path, ignore_errors=True)
os.makedirs(self.temp_path, exist_ok=True)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
|