File size: 4,262 Bytes
792ad00
 
951d5c6
792ad00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
951d5c6
792ad00
 
 
 
 
951d5c6
792ad00
 
 
 
 
951d5c6
 
792ad00
 
 
 
 
951d5c6
 
 
 
 
 
 
 
 
792ad00
 
 
 
 
 
 
 
 
 
 
 
 
 
951d5c6
 
 
792ad00
 
 
 
 
951d5c6
 
 
 
 
792ad00
 
 
 
 
951d5c6
792ad00
 
 
 
 
 
951d5c6
 
 
 
792ad00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import logging
import os
import asyncio
import tempfile
from typing import Optional
import openai
from core.config import settings
from core.prompts import get_mindmap_system_prompt
from services.s3_service import s3_service

logger = logging.getLogger(__name__)

class MindMapService:
    def __init__(self):
        self.openai_client = openai.OpenAI(api_key=settings.OPENAI_API_KEY)

    async def generate_mindmap(
        self,
        file_key: Optional[str] = None,
        text_input: Optional[str] = None
    ) -> str:
        """
        Generates a Mermaid mindmap from either an S3 PDF or direct text input.
        Uses asyncio.to_thread for all blocking I/O operations.
        """
        try:
            system_prompt = get_mindmap_system_prompt()

            if file_key:
                # Download PDF from S3 (non-blocking)
                tmp = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
                tmp_path = tmp.name
                tmp.close()

                try:
                    await asyncio.to_thread(
                        s3_service.s3_client.download_file,
                        settings.AWS_S3_BUCKET,
                        file_key,
                        tmp_path
                    )
                    
                    # Upload to OpenAI (non-blocking)
                    def upload_to_openai():
                        with open(tmp_path, "rb") as f:
                            return self.openai_client.files.create(
                                file=f,
                                purpose="assistants"
                            )
                    
                    uploaded_file = await asyncio.to_thread(upload_to_openai)
                    
                    messages = [
                        {"role": "system", "content": system_prompt},
                        {
                            "role": "user",
                            "content": [
                                {
                                    "type": "file",
                                    "file": {"file_id": uploaded_file.id}
                                }
                            ]
                        }
                    ]
                    
                    # Call OpenAI (non-blocking)
                    response = await asyncio.to_thread(
                        self.openai_client.chat.completions.create,
                        model="gpt-4o-mini",
                        messages=messages,
                        temperature=0.7
                    )
                    
                    # Clean up OpenAI file (non-blocking)
                    await asyncio.to_thread(
                        self.openai_client.files.delete,
                        uploaded_file.id
                    )
                    
                    raw_content = response.choices[0].message.content
                    
                finally:
                    if os.path.exists(tmp_path):
                        await asyncio.to_thread(os.remove, tmp_path)
            
            elif text_input:
                messages = [
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": text_input}
                ]
                
                # Call OpenAI (non-blocking)
                response = await asyncio.to_thread(
                    self.openai_client.chat.completions.create,
                    model="gpt-4o-mini",
                    messages=messages,
                    temperature=0.7
                )
                raw_content = response.choices[0].message.content
            
            else:
                raise ValueError("Either file_key or text_input must be provided")

            # Clean up the output
            if "```mermaid" in raw_content:
                raw_content = raw_content.split("```mermaid")[1].split("```")[0].strip()
            elif "```" in raw_content:
                raw_content = raw_content.split("```")[1].split("```")[0].strip()
            
            return raw_content.strip()

        except Exception as e:
            logger.error(f"Mind map generation failed: {e}")
            raise

mindmap_service = MindMapService()