File size: 8,294 Bytes
ffbd730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fcd754
 
 
 
 
ffbd730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a267084
ffbd730
 
 
 
 
a267084
ffbd730
a267084
 
ffbd730
 
a267084
 
ffbd730
a267084
ffbd730
 
 
a267084
ffbd730
a267084
ffbd730
 
 
 
 
 
 
 
 
 
 
 
 
a267084
ffbd730
a267084
 
ffbd730
 
 
a267084
ffbd730
 
 
a267084
ffbd730
 
 
 
a267084
 
ffbd730
 
 
a267084
 
 
 
ffbd730
a267084
ffbd730
 
a267084
ffbd730
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2fcd754
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
"""
Sistema RAG simplificado para Hugging Face Spaces
Version optimizada con Salamandra 7B Instruct
"""

import os
from typing import List, Dict
from dataclasses import dataclass
import torch
from sentence_transformers import SentenceTransformer
from qdrant_client import QdrantClient
from transformers import AutoModelForCausalLM, AutoTokenizer
import time


@dataclass
class RAGResult:
    """Resultado de una consulta RAG."""
    query: str
    answer: str
    sources: List[Dict]
    retrieval_time: float
    generation_time: float
    total_time: float


class RAGLLMSystem:
    """Sistema RAG + Salamandra LLM."""

    def __init__(self):
        """Inicializar sistema."""

        # Configuracion desde variables de entorno
        self.qdrant_url = os.getenv("QDRANT_URL")
        self.qdrant_api_key = os.getenv("QDRANT_API_KEY")
        self.qdrant_collection = os.getenv("QDRANT_COLLECTION", "alia_turismo_docs")

        # Debug: verificar que las variables existen
        print(f"[DEBUG] QDRANT_URL configurado: {self.qdrant_url is not None}")
        print(f"[DEBUG] QDRANT_API_KEY configurado: {self.qdrant_api_key is not None}")
        print(f"[DEBUG] QDRANT_COLLECTION: {self.qdrant_collection}")

        # Modelo LLM
        self.llm_model_name = "BSC-LT/salamandra-7b-instruct"

        # Modelo de embeddings
        self.embedding_model_name = "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"

        # Detectar dispositivo
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print(f"[RAG] Dispositivo: {self.device}")

        # Inicializar componentes
        self._init_qdrant_client()
        self._init_embedding_model()
        self._init_salamandra_model()

    def _init_qdrant_client(self):
        """Inicializar cliente de Qdrant."""
        print(f"[RAG] Conectando a Qdrant Cloud...")
        self.qdrant_client = QdrantClient(
            url=self.qdrant_url,
            api_key=self.qdrant_api_key
        )
        print(f"[RAG] Conectado a Qdrant")

    def _init_embedding_model(self):
        """Inicializar modelo de embeddings."""
        print(f"[RAG] Cargando modelo de embeddings...")
        self.embedding_model = SentenceTransformer(
            self.embedding_model_name,
            device=self.device
        )
        print(f"[RAG] Embeddings cargados")

    def _init_salamandra_model(self):
        """Inicializar Salamandra 7B Instruct con cuantizacion 8-bit."""
        print(f"[RAG] Cargando Salamandra 7B Instruct (8-bit cuantizado)...")

        # Cargar tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(self.llm_model_name)

        # Cargar modelo con cuantizacion 8-bit para ahorrar memoria
        if self.device == 'cuda':
            self.llm_model = AutoModelForCausalLM.from_pretrained(
                self.llm_model_name,
                load_in_8bit=True,
                device_map="auto",
                low_cpu_mem_usage=True
            )
            print(f"[RAG] Salamandra cargado en GPU (8-bit)")
        else:
            self.llm_model = AutoModelForCausalLM.from_pretrained(
                self.llm_model_name,
                torch_dtype=torch.float32,
                low_cpu_mem_usage=True
            )
            print(f"[RAG] Salamandra cargado en CPU")

        self.llm_model.eval()

    def retrieve_context(
        self,
        query: str,
        top_k: int = 5,
        score_threshold: float = 0.6
    ) -> List[Dict]:
        """Recuperar documentos relevantes."""

        # Generar embedding
        query_embedding = self.embedding_model.encode(
            query,
            convert_to_numpy=True
        )

        # Buscar en Qdrant
        results = self.qdrant_client.query_points(
            collection_name=self.qdrant_collection,
            query=query_embedding.tolist(),
            limit=top_k
        ).points

        # Filtrar y formatear
        documents = []
        for result in results:
            if result.score >= score_threshold:
                documents.append({
                    'content': result.payload.get('full_content', ''),
                    'filename': result.payload.get('filename', ''),
                    'category': result.payload.get('category', ''),
                    'score': result.score,
                    'id': result.id
                })

        return documents

    def generate_answer(
        self,
        query: str,
        context_docs: List[Dict],
        max_new_tokens: int = 512,
        temperature: float = 0.7,
        top_p: float = 0.9
    ) -> str:
        """Generar respuesta con Salamandra."""

        # Construir contexto (limitado para evitar timeouts)
        context_text = "\n\n---\n\n".join([
            f"[Doc: {doc['filename'][:30]}]\n{doc['content'][:1000]}"
            for doc in context_docs[:3]  # Solo top 3 docs
        ])

        # Prompt optimizado (más corto)
        prompt = f"""Eres ALIA, asistente de turismo de la Comunidad Valenciana.

Responde basandote en estos documentos:

{context_text}

PREGUNTA: {query}

RESPUESTA (sé conciso):"""

        # Tokenizar
        inputs = self.tokenizer(
            prompt,
            return_tensors="pt",
            truncation=True,
            max_length=4096
        )

        # Mover a dispositivo
        if self.device == 'cuda':
            inputs = {k: v.cuda() for k, v in inputs.items()}

        # Generar con parametros optimizados
        try:
            print(f"[GENERATE] Iniciando generacion en {self.device}...")

            with torch.no_grad():
                outputs = self.llm_model.generate(
                    **inputs,
                    max_new_tokens=min(max_new_tokens, 256),  # Limitar a 256 tokens max
                    temperature=temperature,
                    top_p=top_p,
                    do_sample=True,
                    num_beams=1,  # Greedy decoding para velocidad
                    pad_token_id=self.tokenizer.eos_token_id,
                    eos_token_id=self.tokenizer.eos_token_id,
                )

            print(f"[GENERATE] Generacion completada")

            # Decodificar
            response = self.tokenizer.decode(outputs[0], skip_special_tokens=True)

            # Extraer solo la respuesta generada
            if "RESPUESTA" in response:
                response = response.split("RESPUESTA")[-1].strip()
                response = response.replace("(sé conciso):", "").strip()

            return response[:2000]  # Limitar largo de respuesta

        except Exception as e:
            print(f"[ERROR] Error en generacion: {str(e)}")
            return f"Error generando respuesta: {str(e)}"

    def query(
        self,
        question: str,
        top_k: int = 5,
        score_threshold: float = 0.6,
        max_new_tokens: int = 1024,
        temperature: float = 0.7
    ) -> RAGResult:
        """Procesar consulta completa."""

        start_time = time.time()

        # Recuperar contexto
        retrieval_start = time.time()
        context_docs = self.retrieve_context(question, top_k, score_threshold)
        retrieval_time = time.time() - retrieval_start

        if not context_docs:
            return RAGResult(
                query=question,
                answer="No se encontraron documentos relevantes para responder tu pregunta.",
                sources=[],
                retrieval_time=retrieval_time,
                generation_time=0,
                total_time=time.time() - start_time
            )

        # Generar respuesta
        generation_start = time.time()
        answer = self.generate_answer(
            question,
            context_docs,
            max_new_tokens=max_new_tokens,
            temperature=temperature
        )
        generation_time = time.time() - generation_start

        # Preparar resultado
        sources = [{
            'filename': doc['filename'],
            'category': doc['category'],
            'score': doc['score']
        } for doc in context_docs]

        return RAGResult(
            query=question,
            answer=answer,
            sources=sources,
            retrieval_time=retrieval_time,
            generation_time=generation_time,
            total_time=time.time() - start_time
        )