File size: 11,796 Bytes
73fd1fc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1e5882
484cae8
 
73fd1fc
 
 
 
 
 
 
d1e5882
73fd1fc
 
d1e5882
73fd1fc
 
 
d1e5882
 
 
 
 
73fd1fc
d1e5882
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
484cae8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0452a50
484cae8
 
 
73fd1fc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
"""
Document Ingestion Service

Handles ingestion of various document types (PDF, DOCX, TXT, URL, raw_text)
with metadata support and automatic type detection.
"""

import os
import re
import logging
from typing import Dict, Any, Optional
from urllib.parse import urlparse
import httpx
from io import BytesIO

logger = logging.getLogger("document_ingestion")


def detect_source_type(content: str, filename: Optional[str] = None, url: Optional[str] = None) -> str:
    """
    Detect the source type from content, filename, or URL.
    Returns: 'pdf', 'docx', 'txt', 'url', 'raw_text', 'markdown'
    """
    if url:
        return "url"
    
    if filename:
        ext = filename.lower().split('.')[-1] if '.' in filename else ''
        if ext in ['pdf']:
            return 'pdf'
        elif ext in ['docx', 'doc']:
            return 'docx'
        elif ext in ['txt', 'text']:
            return 'txt'
        elif ext in ['md', 'markdown']:
            return 'markdown'
    
    # Heuristic detection from content
    content_lower = content.lower()
    if 'http://' in content_lower or 'https://' in content_lower or 'www.' in content_lower:
        return 'url'
    
    return 'raw_text'


async def extract_text_from_url(url: str, timeout: int = 30) -> str:
    """
    Fetch and extract text content from a URL (async).
    """
    try:
        async with httpx.AsyncClient(timeout=timeout, follow_redirects=True) as client:
            response = await client.get(url)
            response.raise_for_status()
            
            # Basic HTML stripping (for simple pages)
            text = response.text
            # Remove script and style tags
            text = re.sub(r'<script[^>]*>.*?</script>', '', text, flags=re.DOTALL | re.IGNORECASE)
            text = re.sub(r'<style[^>]*>.*?</style>', '', text, flags=re.DOTALL | re.IGNORECASE)
            # Remove HTML tags
            text = re.sub(r'<[^>]+>', ' ', text)
            # Normalize whitespace
            text = re.sub(r'\s+', ' ', text).strip()
            
            return text
    except Exception as e:
        logger.error(f"Failed to fetch URL {url}: {e}")
        raise ValueError(f"Failed to fetch URL: {str(e)}")


def extract_text_from_file_bytes(file_bytes: bytes, filename: str) -> str:
    """
    Extract text from binary file data (PDF, DOCX, etc.).
    
    Args:
        file_bytes: Binary file content
        filename: Original filename (for type detection)
    
    Returns:
        Extracted text content
    """
    ext = filename.lower().split('.')[-1] if '.' in filename else ''
    
    # PDF extraction
    if ext == 'pdf':
        try:
            import PyPDF2
            pdf_file = BytesIO(file_bytes)
            pdf_reader = PyPDF2.PdfReader(pdf_file)
            text_parts = []
            for page in pdf_reader.pages:
                text_parts.append(page.extract_text())
            return '\n'.join(text_parts)
        except ImportError:
            logger.warning("PyPDF2 not installed, cannot extract PDF text")
            raise ValueError("PDF extraction requires PyPDF2. Install with: pip install PyPDF2")
        except Exception as e:
            logger.error(f"PDF extraction failed: {e}")
            raise ValueError(f"Failed to extract text from PDF: {str(e)}")
    
    # DOCX extraction
    elif ext in ['docx', 'doc']:
        try:
            from docx import Document
            doc_file = BytesIO(file_bytes)
            doc = Document(doc_file)
            return '\n'.join(paragraph.text for paragraph in doc.paragraphs)
        except ImportError:
            logger.warning("python-docx not installed, cannot extract DOCX text")
            raise ValueError("DOCX extraction requires python-docx. Install with: pip install python-docx")
        except Exception as e:
            logger.error(f"DOCX extraction failed: {e}")
            raise ValueError(f"Failed to extract text from DOCX: {str(e)}")
    
    # Text files (TXT, MD)
    elif ext in ['txt', 'md', 'markdown', 'text']:
        try:
            return file_bytes.decode('utf-8', errors='ignore')
        except Exception as e:
            logger.error(f"Text file decoding failed: {e}")
            raise ValueError(f"Failed to decode text file: {str(e)}")
    
    else:
        # Try to decode as UTF-8 text as fallback
        try:
            return file_bytes.decode('utf-8', errors='ignore')
        except Exception:
            raise ValueError(f"Unsupported file type: {ext}. Supported: pdf, docx, txt, md")


def normalize_text(text: str) -> str:
    """
    Sanitize and normalize text before ingestion.
    """
    # Remove excessive whitespace
    text = re.sub(r'\s+', ' ', text)
    # Remove control characters except newlines and tabs
    text = re.sub(r'[\x00-\x08\x0B-\x0C\x0E-\x1F\x7F]', '', text)
    # Strip leading/trailing whitespace
    text = text.strip()
    return text


async def prepare_ingestion_payload(
    tenant_id: str,
    content: str,
    source_type: Optional[str] = None,
    filename: Optional[str] = None,
    url: Optional[str] = None,
    doc_id: Optional[str] = None,
    metadata: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
    """
    Prepare ingestion payload according to the system prompt specification.
    
    Returns:
        {
            "action": "ingest_document",
            "tenant_id": "...",
            "source_type": "pdf | docx | txt | url | raw_text",
            "content": "...",
            "metadata": {
                "filename": "...",
                "url": "...",
                "doc_id": "..."
            }
        }
    """
    # Auto-detect source type if not provided
    if not source_type:
        source_type = detect_source_type(content, filename, url)
    
    # Handle URL: fetch content (async)
    if source_type == "url" and url:
        try:
            content = await extract_text_from_url(url)
        except Exception as e:
            logger.warning(f"URL fetch failed, using provided content: {e}")
    
    # Normalize content
    content = normalize_text(content)
    
    if not content:
        raise ValueError("Content is empty after normalization")
    
    # Generate doc_id if not provided
    if not doc_id:
        if filename:
            doc_id = filename
        elif url:
            parsed = urlparse(url)
            doc_id = f"{parsed.netloc}{parsed.path}".replace('/', '_')[:100]
        else:
            import hashlib
            doc_id = hashlib.md5(content.encode()).hexdigest()[:16]
    
    # Build metadata
    ingestion_metadata = {
        "doc_id": doc_id,
        **(metadata or {})
    }
    
    if filename:
        ingestion_metadata["filename"] = filename
    if url:
        ingestion_metadata["url"] = url
    
    return {
        "action": "ingest_document",
        "tenant_id": tenant_id,
        "source_type": source_type,
        "content": content,
        "metadata": ingestion_metadata
    }


async def process_ingestion(
    payload: Dict[str, Any],
    rag_client,
    extract_metadata: bool = True,
    user_role: Optional[str] = None
) -> Dict[str, Any]:
    """
    Process the ingestion payload by sending it to the RAG MCP server.
    
    Args:
        payload: The ingestion payload from prepare_ingestion_payload
        rag_client: RAGClient instance
        extract_metadata: Whether to extract AI-generated metadata (default: True)
    
    Returns:
        Result from RAG ingestion with extracted metadata
    """
    tenant_id = payload["tenant_id"]
    content = payload["content"]
    metadata = payload.get("metadata", {})
    source_type = payload.get("source_type", "raw_text")
    filename = metadata.get("filename")
    url = metadata.get("url")
    doc_id = metadata.get("doc_id")
    
    # Extract AI-generated metadata
    extracted_metadata = {}
    if extract_metadata:
        try:
            from ..services.metadata_extractor import MetadataExtractor
            extractor = MetadataExtractor()
            extracted_metadata = await extractor.extract_metadata(
                content=content,
                filename=filename,
                url=url,
                source_type=source_type
            )
        except Exception as e:
            logger.warning(f"Metadata extraction failed: {e}, continuing without metadata")
    
    # Merge extracted metadata with provided metadata
    final_metadata = {
        **metadata,
        **extracted_metadata
    }
    
    # Send to RAG MCP server with metadata
    try:
        result = await rag_client.ingest_with_metadata(
            content=content,
            tenant_id=tenant_id,
            metadata=final_metadata,
            doc_id=doc_id,
            user_role=user_role
        )
        
        # Check if result indicates an error (multiple ways the RAG server can signal errors)
        if isinstance(result, dict):
            # Check for explicit error status
            if result.get("status") == "error":
                error_msg = result.get("message") or result.get("error") or "Unknown error from RAG server"
                error_type = result.get("error_type", "unknown_error")
                logger.error(f"RAG ingestion error ({error_type}): {error_msg}")
                
                # For permission errors, raise a specific exception that can be caught and converted to HTTPException
                if "permission" in error_msg.lower() or "not permitted" in error_msg.lower() or error_type == "validation_error":
                    # Create a custom exception that will be caught and converted to HTTPException
                    class PermissionError(Exception):
                        pass
                    perm_err = PermissionError(f"Permission denied: {error_msg}")
                    perm_err.status_code = 403
                    perm_err.detail = f"Permission denied: {error_msg}\n\nPlease change your role to 'editor', 'admin', or 'owner' in the User Role dropdown in app.py."
                    raise perm_err
                
                raise ValueError(f"RAG server error ({error_type}): {error_msg}")
            
            # Check for error field
            if "error" in result:
                error_msg = result.get("error", "Unknown error from RAG server")
                logger.error(f"RAG ingestion error: {error_msg}")
                raise ValueError(f"RAG server error: {error_msg}")
        
        chunks_stored = result.get("chunks_stored", 0) if isinstance(result, dict) else 0
        
        # Enhance result with metadata
        response = {
            "status": "ok",
            "tenant_id": tenant_id,
            "source_type": source_type,
            "doc_id": doc_id,
            "chunks_stored": chunks_stored,
            "metadata": final_metadata,
            "extracted_metadata": extracted_metadata,  # Include extracted metadata in response
        }
        
        # Add any additional fields from result if it's a dict
        if isinstance(result, dict):
            response.update(result)
        
        return response
    except Exception as e:
        # Re-raise permission errors as-is (they'll be caught and converted to HTTPException)
        if hasattr(e, 'status_code') and e.status_code == 403:
            raise
        
        logger.error(f"Failed to ingest document to RAG server: {e}", exc_info=True)
        # Re-raise with more context
        raise RuntimeError(
            f"Failed to send document to RAG MCP server: {str(e)}\n\n"
            f"Please check:\n"
            f"1. RAG_MCP_URL is set correctly (default: http://localhost:8900/rag)\n"
            f"2. RAG MCP server is running\n"
            f"3. Database connection (POSTGRESQL_URL) is configured in the RAG server"
        ) from e