File size: 14,143 Bytes
2c5e855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3909a
 
2c5e855
bb3909a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c5e855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a929e66
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ffff3e5
a929e66
 
 
 
 
 
 
 
 
ffff3e5
a929e66
 
 
ffff3e5
a929e66
 
 
 
 
 
 
 
 
 
 
 
 
 
f36fcc8
a929e66
 
 
 
 
 
 
 
ffff3e5
a929e66
 
 
ffff3e5
a929e66
ffff3e5
a929e66
 
 
ffff3e5
a929e66
ffff3e5
a929e66
 
 
 
f36fcc8
a929e66
 
 
 
 
 
 
 
2c5e855
 
bb3909a
2c5e855
 
 
 
bb3909a
5134f75
bb3909a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5134f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3909a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5134f75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c5e855
 
 
bb3909a
2c5e855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bb3909a
2c5e855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
# utils/__init__.py - Core utilities for PDF Analysis & Orchestrator
import os
import asyncio
import tempfile
import hashlib
import json
import time
from pathlib import Path
import pdfplumber
import numpy as np
from uuid import uuid4
import openai
import shutil
from typing import List, Dict, Any, Optional

# ------------------------
# OpenAI setup
# ------------------------
OPENAI_KEY = os.environ.get("OPENAI_API_KEY")
if OPENAI_KEY is None:
    raise RuntimeError("Set OPENAI_API_KEY environment variable before running.")

openai.api_key = OPENAI_KEY


def uuid4_hex():
    from uuid import uuid4
    return uuid4().hex

# ------------------------
# Async OpenAI Chat Wrapper
# ------------------------
async def call_openai_chat(model: str, messages: list, temperature=0.2, max_tokens=800):
    """
    Async wrapper for OpenAI >=1.0.0 Chat Completions
    """
    def _call():
        resp = openai.chat.completions.create(
            model=model,
            messages=messages,
            temperature=temperature,
            max_tokens=max_tokens,
        )
        return resp.choices[0].message.content.strip()
    return await asyncio.to_thread(_call)

# ------------------------
# PDF Utilities
# ------------------------
def load_pdf_text(path: str) -> str:
    """Extract comprehensive content from PDF using pdfplumber"""
    content = []
    with pdfplumber.open(path) as pdf:
        for page_num, page in enumerate(pdf.pages, 1):
            page_content = []
            
            # Extract text
            text = page.extract_text()
            if text:
                page_content.append(f"=== PAGE {page_num} TEXT ===")
                page_content.append(text)
            
            # Extract tables
            tables = page.extract_tables()
            if tables:
                page_content.append(f"\n=== PAGE {page_num} TABLES ===")
                for table_num, table in enumerate(tables, 1):
                    page_content.append(f"\n--- TABLE {table_num} ---")
                    for row in table:
                        if row:  # Skip empty rows
                            # Clean and format table row
                            clean_row = [cell.strip() if cell else "" for cell in row]
                            page_content.append(" | ".join(clean_row))
            
            # Extract images info (metadata only)
            images = page.images
            if images:
                page_content.append(f"\n=== PAGE {page_num} IMAGES ===")
                for img_num, img in enumerate(images, 1):
                    page_content.append(f"Image {img_num}: {img.get('width', 'unknown')}x{img.get('height', 'unknown')} pixels")
            
            # Extract metadata
            page_content.append(f"\n=== PAGE {page_num} METADATA ===")
            page_content.append(f"Page size: {page.width}x{page.height}")
            page_content.append(f"Rotation: {page.rotation}")
            
            if page_content:
                content.append("\n".join(page_content))
    
    return "\n\n".join(content)

def save_text_as_file(text: str, suffix=".txt") -> str:
    """Save text to a temporary file"""
    fp = Path(tempfile.gettempdir()) / f"analysis_{uuid4().hex}{suffix}"
    fp.write_text(text, encoding="utf-8")
    return str(fp)

def save_uploaded_file(uploaded) -> str:
    """
    Save uploaded file to temporary location
    """
    dst = Path(tempfile.gettempdir()) / f"upload_{uuid4().hex}.pdf"
    with open(dst, "wb") as f:
        shutil.copyfileobj(uploaded, f)
    return str(dst)

# ------------------------
# Document Chunking
# ------------------------
def chunk_text(text: str, chunk_size: int = 15000, overlap: int = 1000) -> List[str]:
    """
    Split text into overlapping chunks for processing large documents
    """
    if len(text) <= chunk_size:
        return [text]
    
    chunks = []
    start = 0
    
    while start < len(text):
        end = start + chunk_size
        
        # Try to break at sentence boundary
        if end < len(text):
            # Look for sentence endings within the last 200 characters
            search_start = max(start, end - 200)
            sentence_end = text.rfind('.', search_start, end)
            if sentence_end > search_start:
                end = sentence_end + 1
        
        chunk = text[start:end].strip()
        if chunk:
            chunks.append(chunk)
        
        # Move start position with overlap
        start = end - overlap
        if start >= len(text):
            break
    
    return chunks


def get_file_hash(file_path: str) -> str:
    """Generate hash for file caching"""
    with open(file_path, 'rb') as f:
        return hashlib.md5(f.read()).hexdigest()

# ------------------------
# Token Counting Utilities
# ------------------------
def estimate_tokens(text: str) -> int:
    """Rough estimation of token count (1 token ≈ 4 characters for English)"""
    return len(text) // 4

def is_within_token_limit(text: str, max_tokens: int = 6000) -> bool:
    """Check if text is within token limit for API calls"""
    return estimate_tokens(text) <= max_tokens

def truncate_to_token_limit(text: str, max_tokens: int = 6000) -> str:
    """Truncate text to fit within token limit"""
    if is_within_token_limit(text, max_tokens):
        return text
    
    # Rough character limit based on token estimation
    char_limit = max_tokens * 4
    return text[:char_limit] + "\n\n[Content truncated due to length...]"

# ------------------------
# Hierarchical Summarization
# ------------------------
async def create_hierarchical_summary(chunk_results: List[str], prompt: str, model: str, max_tokens: int = 6000) -> str:
    """Create a summary using hierarchical approach to avoid token limits"""
    
    # First, create intermediate summaries of groups of chunks
    intermediate_summaries = []
    group_size = 3  # Process 3 chunks at a time
    
    for i in range(0, len(chunk_results), group_size):
        group = chunk_results[i:i + group_size]
        group_text = "\n\n".join(group)
        
        # Truncate if too long
        if not is_within_token_limit(group_text, max_tokens):
            group_text = truncate_to_token_limit(group_text, max_tokens)
        
        group_prompt = f"Summarize the following chunk analyses, focusing on key insights and findings:\n\n{group_text}"
        
        try:
            summary = await call_openai_chat(
                model=model,
                messages=[
                    {"role": "system", "content": "You are an expert analyst creating sophisticated summaries. Focus on:\n- Identifying strategic opportunities and competitive advantages\n- Extracting specific, actionable insights with real-world applications\n- Highlighting unique value propositions and market implications\n- Connecting insights to broader business themes and opportunities\n- Providing concrete examples and implementation considerations"},
                    {"role": "user", "content": group_prompt}
                ],
                temperature=0.2,
                max_tokens=800
            )
            intermediate_summaries.append(f"Group {i//group_size + 1} Summary:\n{summary}")
        except Exception as e:
            intermediate_summaries.append(f"Group {i//group_size + 1} Summary:\nError: {str(e)}")
    
    # Now create final summary from intermediate summaries
    if len(intermediate_summaries) == 1:
        return intermediate_summaries[0]
    
    final_text = "\n\n".join(intermediate_summaries)
    
    # If still too long, create another level of summarization
    if not is_within_token_limit(final_text, max_tokens):
        final_text = truncate_to_token_limit(final_text, max_tokens)
    
    final_prompt = f"Create a comprehensive final summary based on the following intermediate summaries. Original prompt: {prompt}\n\n{final_text}"
    
    try:
        final_summary = await call_openai_chat(
            model=model,
            messages=[
                {"role": "system", "content": "You are a strategic business analyst creating comprehensive, actionable insights. Your final summary should:\n- Synthesize insights into a coherent strategic narrative\n- Prioritize opportunities by potential impact and feasibility\n- Provide specific, actionable recommendations with clear next steps\n- Include quantifiable insights where possible (market size, ROI, timelines)\n- Address implementation challenges and mitigation strategies\n- Connect all insights to create a unified strategic vision\n- Focus on what matters most for business success"},
                {"role": "user", "content": final_prompt}
            ],
            temperature=0.2,
            max_tokens=1000
        )
        return final_summary
    except Exception as e:
        return f"Error creating final summary: {str(e)}\n\nIntermediate summaries:\n{final_text}"

# ------------------------
# Enhanced Caching System
# ------------------------
CACHE_DIR = Path(tempfile.gettempdir()) / "pdf_analysis_cache"
CACHE_DIR.mkdir(exist_ok=True)

def get_cached_analysis(file_path: str, prompt: str) -> Optional[Dict[str, Any]]:
    """Retrieve cached analysis if available - exact prompt match"""
    file_hash = get_file_hash(file_path)
    prompt_hash = hashlib.md5(prompt.encode()).hexdigest()
    cache_file = CACHE_DIR / f"{file_hash}_{prompt_hash}.json"
    
    if cache_file.exists():
        try:
            with open(cache_file, 'r', encoding='utf-8') as f:
                cache_data = json.load(f)
                # Check if file hasn't been modified and cache is still valid (24 hours)
                if (cache_data.get('file_hash') == file_hash and 
                    cache_data.get('prompt_hash') == prompt_hash and
                    time.time() - cache_data.get('cached_at', 0) < 86400):  # 24 hours
                    return cache_data.get('analysis')
        except Exception:
            pass
    return None

def get_cached_document_content(file_path: str) -> Optional[str]:
    """Retrieve cached document content for any prompt - document-only match"""
    file_hash = get_file_hash(file_path)
    cache_file = CACHE_DIR / f"{file_hash}_content.json"
    
    if cache_file.exists():
        try:
            with open(cache_file, 'r', encoding='utf-8') as f:
                cache_data = json.load(f)
                # Check if file hasn't been modified and cache is still valid (24 hours)
                if (cache_data.get('file_hash') == file_hash and
                    time.time() - cache_data.get('cached_at', 0) < 86400):  # 24 hours
                    return cache_data.get('content')
        except Exception:
            pass
    return None

def cache_analysis(file_path: str, prompt: str, analysis: Dict[str, Any]) -> None:
    """Cache analysis results for future use"""
    file_hash = get_file_hash(file_path)
    prompt_hash = hashlib.md5(prompt.encode()).hexdigest()
    cache_file = CACHE_DIR / f"{file_hash}_{prompt_hash}.json"
    
    try:
        cache_data = {
            'file_hash': file_hash,
            'prompt_hash': prompt_hash,
            'analysis': analysis,
            'cached_at': time.time()
        }
        with open(cache_file, 'w', encoding='utf-8') as f:
            json.dump(cache_data, f, ensure_ascii=False)
    except Exception:
        pass  # Fail silently if caching fails

def cache_document_content(file_path: str, content: str) -> None:
    """Cache document content for reuse with any prompt"""
    file_hash = get_file_hash(file_path)
    cache_file = CACHE_DIR / f"{file_hash}_content.json"
    
    try:
        cache_data = {
            'file_hash': file_hash,
            'content': content,
            'cached_at': time.time()
        }
        with open(cache_file, 'w', encoding='utf-8') as f:
            json.dump(cache_data, f, ensure_ascii=False)
    except Exception:
        pass  # Fail silently if caching fails

def get_cached_text(file_path: str) -> Optional[str]:
    """Retrieve cached PDF text if available"""
    file_hash = get_file_hash(file_path)
    cache_file = CACHE_DIR / f"{file_hash}_text.json"
    
    if cache_file.exists():
        try:
            with open(cache_file, 'r', encoding='utf-8') as f:
                cache_data = json.load(f)
                # Check if file hasn't been modified
                if cache_data.get('file_hash') == file_hash:
                    return cache_data.get('text')
        except Exception:
            pass
    return None

def cache_text(file_path: str, text: str) -> None:
    """Cache PDF text for future use"""
    file_hash = get_file_hash(file_path)
    cache_file = CACHE_DIR / f"{file_hash}_text.json"
    
    try:
        cache_data = {
            'file_hash': file_hash,
            'text': text,
            'cached_at': time.time()
        }
        with open(cache_file, 'w', encoding='utf-8') as f:
            json.dump(cache_data, f, ensure_ascii=False)
    except Exception:
        pass  # Fail silently if caching fails

def load_pdf_text_cached(path: str) -> str:
    """Load PDF text with caching support"""
    # Try to get from cache first
    cached_text = get_cached_text(path)
    if cached_text:
        return cached_text
    
    # Extract text if not cached
    text = load_pdf_text(path)
    
    # Cache the result
    cache_text(path, text)
    
    return text

# ------------------------
# Enhanced PDF Processing
# ------------------------
def load_pdf_text_chunked(path: str, chunk_size: int = 15000) -> List[str]:
    """Load PDF text and return as chunks for large documents"""
    text = load_pdf_text_cached(path)
    return chunk_text(text, chunk_size)

def get_document_metadata(path: str) -> Dict[str, Any]:
    """Extract basic metadata from PDF"""
    try:
        with pdfplumber.open(path) as pdf:
            return {
                'page_count': len(pdf.pages),
                'file_size': Path(path).stat().st_size,
                'extracted_at': time.time()
            }
    except Exception:
        return {'page_count': 0, 'file_size': 0, 'extracted_at': time.time()}