davidtran999 commited on
Commit
4281be3
·
verified ·
1 Parent(s): 4821398

Upload backend/chatbot/llm_integration.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. backend/chatbot/llm_integration.py +1110 -0
backend/chatbot/llm_integration.py ADDED
@@ -0,0 +1,1110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LLM integration for natural answer generation.
3
+ Supports OpenAI GPT, Anthropic Claude, Ollama, Hugging Face Inference API, Local Hugging Face models, and API mode.
4
+ """
5
+ import os
6
+ import re
7
+ import json
8
+ import sys
9
+ import traceback
10
+ import logging
11
+ import time
12
+ from pathlib import Path
13
+ from typing import List, Dict, Any, Optional, Set, Tuple
14
+
15
+ from .structured_legal import (
16
+ build_structured_legal_prompt,
17
+ get_legal_output_parser,
18
+ parse_structured_output,
19
+ LegalAnswer,
20
+ )
21
+ from .legal_guardrails import get_legal_guard
22
+ try:
23
+ from dotenv import load_dotenv
24
+ load_dotenv()
25
+ except ImportError:
26
+ pass # dotenv is optional
27
+
28
+ logger = logging.getLogger(__name__)
29
+
30
+ BASE_DIR = Path(__file__).resolve().parents[2]
31
+ GUARDRAILS_LOG_DIR = BASE_DIR / "logs" / "guardrails"
32
+ GUARDRAILS_LOG_FILE = GUARDRAILS_LOG_DIR / "legal_structured.log"
33
+
34
+
35
+ def _write_guardrails_debug(label: str, content: Optional[str]) -> None:
36
+ """Persist raw Guardrails inputs/outputs for debugging."""
37
+ if not content:
38
+ return
39
+ try:
40
+ GUARDRAILS_LOG_DIR.mkdir(parents=True, exist_ok=True)
41
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
42
+ snippet = content.strip()
43
+ max_len = 4000
44
+ if len(snippet) > max_len:
45
+ snippet = snippet[:max_len] + "...[truncated]"
46
+ with GUARDRAILS_LOG_FILE.open("a", encoding="utf-8") as fp:
47
+ fp.write(f"[{timestamp}] [{label}] {snippet}\n{'-' * 80}\n")
48
+ except Exception as exc:
49
+ logger.debug("Unable to write guardrails log: %s", exc)
50
+
51
+
52
+ def _collect_doc_metadata(documents: List[Any]) -> Tuple[Set[str], Set[str]]:
53
+ titles: Set[str] = set()
54
+ sections: Set[str] = set()
55
+ for doc in documents:
56
+ document = getattr(doc, "document", None)
57
+ title = getattr(document, "title", None)
58
+ if title:
59
+ titles.add(title.strip())
60
+ section_code = getattr(doc, "section_code", None)
61
+ if section_code:
62
+ sections.add(section_code.strip())
63
+ return titles, sections
64
+
65
+
66
+ def _contains_any(text: str, tokens: Set[str]) -> bool:
67
+ if not tokens:
68
+ return True
69
+ normalized = text.lower()
70
+ return any(token.lower() in normalized for token in tokens if token)
71
+
72
+
73
+ def _validate_structured_answer(
74
+ answer: "LegalAnswer",
75
+ documents: List[Any],
76
+ ) -> Tuple[bool, str]:
77
+ """Ensure structured answer references actual documents/sections."""
78
+ allowed_titles, allowed_sections = _collect_doc_metadata(documents)
79
+ if allowed_titles and not _contains_any(answer.summary, allowed_titles):
80
+ return False, "Summary thiếu tên văn bản từ bảng tham chiếu"
81
+
82
+ for idx, bullet in enumerate(answer.details, 1):
83
+ if allowed_titles and not _contains_any(bullet, allowed_titles):
84
+ return False, f"Chi tiết {idx} thiếu tên văn bản"
85
+ if allowed_sections and not _contains_any(bullet, allowed_sections):
86
+ return False, f"Chi tiết {idx} thiếu mã điều/khoản"
87
+
88
+ allowed_title_lower = {title.lower() for title in allowed_titles}
89
+ allowed_section_lower = {section.lower() for section in allowed_sections}
90
+
91
+ for idx, citation in enumerate(answer.citations, 1):
92
+ if citation.document_title and citation.document_title.lower() not in allowed_title_lower:
93
+ return False, f"Citation {idx} chứa văn bản không có trong nguồn"
94
+ if (
95
+ citation.section_code
96
+ and allowed_section_lower
97
+ and citation.section_code.lower() not in allowed_section_lower
98
+ ):
99
+ return False, f"Citation {idx} chứa điều/khoản không có trong nguồn"
100
+
101
+ return True, ""
102
+
103
+ # Import download progress tracker (optional)
104
+ try:
105
+ from .download_progress import get_progress_tracker, DownloadProgress
106
+ PROGRESS_TRACKER_AVAILABLE = True
107
+ except ImportError:
108
+ PROGRESS_TRACKER_AVAILABLE = False
109
+ logger.warning("Download progress tracker not available")
110
+
111
+ # LLM Provider types
112
+ LLM_PROVIDER_OPENAI = "openai"
113
+ LLM_PROVIDER_ANTHROPIC = "anthropic"
114
+ LLM_PROVIDER_OLLAMA = "ollama"
115
+ LLM_PROVIDER_HUGGINGFACE = "huggingface" # Hugging Face Inference API
116
+ LLM_PROVIDER_LOCAL = "local" # Local Hugging Face Transformers model
117
+ LLM_PROVIDER_API = "api" # API mode - call HF Spaces API
118
+ LLM_PROVIDER_NONE = "none"
119
+
120
+ # Get provider from environment (default to local Qwen if none provided)
121
+ DEFAULT_LLM_PROVIDER = os.environ.get("DEFAULT_LLM_PROVIDER", LLM_PROVIDER_LOCAL).lower()
122
+ env_provider = os.environ.get("LLM_PROVIDER", "").strip().lower()
123
+ LLM_PROVIDER = env_provider or DEFAULT_LLM_PROVIDER
124
+ LEGAL_STRUCTURED_MAX_ATTEMPTS = max(
125
+ 1, int(os.environ.get("LEGAL_STRUCTURED_MAX_ATTEMPTS", "2"))
126
+ )
127
+
128
+
129
+ class LLMGenerator:
130
+ """Generate natural language answers using LLMs."""
131
+
132
+ def __init__(self, provider: Optional[str] = None):
133
+ """
134
+ Initialize LLM generator.
135
+
136
+ Args:
137
+ provider: LLM provider ('openai', 'anthropic', 'ollama', 'local', 'huggingface', 'api', or None for auto-detect).
138
+ """
139
+ self.provider = provider or LLM_PROVIDER
140
+ self.client = None
141
+ self.local_model = None
142
+ self.local_tokenizer = None
143
+ self.api_base_url = None
144
+ self._initialize_client()
145
+
146
+ def _initialize_client(self):
147
+ """Initialize LLM client based on provider."""
148
+ if self.provider == LLM_PROVIDER_OPENAI:
149
+ try:
150
+ import openai
151
+ api_key = os.environ.get("OPENAI_API_KEY")
152
+ if api_key:
153
+ self.client = openai.OpenAI(api_key=api_key)
154
+ print("✅ OpenAI client initialized")
155
+ else:
156
+ print("⚠️ OPENAI_API_KEY not found, OpenAI disabled")
157
+ except ImportError:
158
+ print("⚠️ openai package not installed, install with: pip install openai")
159
+
160
+ elif self.provider == LLM_PROVIDER_ANTHROPIC:
161
+ try:
162
+ import anthropic
163
+ api_key = os.environ.get("ANTHROPIC_API_KEY")
164
+ if api_key:
165
+ self.client = anthropic.Anthropic(api_key=api_key)
166
+ print("✅ Anthropic client initialized")
167
+ else:
168
+ print("⚠️ ANTHROPIC_API_KEY not found, Anthropic disabled")
169
+ except ImportError:
170
+ print("⚠️ anthropic package not installed, install with: pip install anthropic")
171
+
172
+ elif self.provider == LLM_PROVIDER_OLLAMA:
173
+ self.ollama_base_url = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
174
+ self.ollama_model = os.environ.get("OLLAMA_MODEL", "qwen2.5:7b")
175
+ print(f"✅ Ollama configured (base_url: {self.ollama_base_url}, model: {self.ollama_model})")
176
+
177
+ elif self.provider == LLM_PROVIDER_HUGGINGFACE:
178
+ self.hf_api_key = os.environ.get("HF_TOKEN") or os.environ.get("HUGGINGFACE_API_KEY")
179
+ self.hf_model = os.environ.get("HF_MODEL", "Qwen/Qwen2.5-7B-Instruct")
180
+ if self.hf_api_key:
181
+ print(f"✅ Hugging Face API configured (model: {self.hf_model})")
182
+ else:
183
+ print("⚠️ HF_TOKEN not found, Hugging Face may have rate limits")
184
+
185
+ elif self.provider == LLM_PROVIDER_API:
186
+ # API mode - call HF Spaces API
187
+ self.api_base_url = os.environ.get(
188
+ "HF_API_BASE_URL",
189
+ "https://davidtran999-hue-portal-backend.hf.space/api"
190
+ )
191
+ print(f"✅ API mode configured (base_url: {self.api_base_url})")
192
+
193
+ elif self.provider == LLM_PROVIDER_LOCAL:
194
+ self._initialize_local_model()
195
+
196
+ else:
197
+ print("ℹ️ No LLM provider configured, using template-based generation")
198
+
199
+ def _initialize_local_model(self):
200
+ """Initialize local Hugging Face Transformers model."""
201
+ try:
202
+ from transformers import AutoModelForCausalLM, AutoTokenizer
203
+ import torch
204
+
205
+ # Default to Qwen 2.5 7B with 8-bit quantization (fits in GPU RAM)
206
+ model_path = os.environ.get("LOCAL_MODEL_PATH", "Qwen/Qwen2.5-7B-Instruct")
207
+ device = os.environ.get("LOCAL_MODEL_DEVICE", "auto") # auto, cpu, cuda
208
+
209
+ print(f"[LLM] Loading local model: {model_path}", flush=True)
210
+ logger.info(f"[LLM] Loading local model: {model_path}")
211
+
212
+ # Determine device
213
+ if device == "auto":
214
+ device = "cuda" if torch.cuda.is_available() else "cpu"
215
+
216
+ # Start cache monitoring for download progress (optional)
217
+ try:
218
+ from .cache_monitor import get_cache_monitor
219
+ monitor = get_cache_monitor()
220
+ monitor.start_monitoring(model_path, interval=2.0)
221
+ print(f"[LLM] 📊 Started cache monitoring for {model_path}", flush=True)
222
+ logger.info(f"[LLM] 📊 Started cache monitoring for {model_path}")
223
+ except Exception as e:
224
+ logger.warning(f"Could not start cache monitoring: {e}")
225
+
226
+ # Load tokenizer
227
+ print("[LLM] Loading tokenizer...", flush=True)
228
+ logger.info("[LLM] Loading tokenizer...")
229
+ try:
230
+ self.local_tokenizer = AutoTokenizer.from_pretrained(
231
+ model_path,
232
+ trust_remote_code=True
233
+ )
234
+ print("[LLM] ✅ Tokenizer loaded successfully", flush=True)
235
+ logger.info("[LLM] ✅ Tokenizer loaded successfully")
236
+ except Exception as tokenizer_err:
237
+ error_trace = traceback.format_exc()
238
+ print(f"[LLM] ❌ Tokenizer load error: {tokenizer_err}", flush=True)
239
+ print(f"[LLM] ❌ Tokenizer trace: {error_trace}", flush=True)
240
+ logger.error(f"[LLM] ❌ Tokenizer load error: {tokenizer_err}\n{error_trace}")
241
+ print(f"[LLM] ❌ ERROR: {type(tokenizer_err).__name__}: {str(tokenizer_err)}", file=sys.stderr, flush=True)
242
+ traceback.print_exc(file=sys.stderr)
243
+ raise
244
+
245
+ # Load model with optional quantization and fallback mechanism
246
+ print(f"[LLM] Loading model to {device}...", flush=True)
247
+ logger.info(f"[LLM] Loading model to {device}...")
248
+
249
+ # Check for quantization config
250
+ # Default to 8-bit for 7B (better thinking), 4-bit for larger models
251
+ default_8bit = "7b" in model_path.lower() or "7B" in model_path
252
+ default_4bit = ("32b" in model_path.lower() or "32B" in model_path or "14b" in model_path.lower() or "14B" in model_path) and not default_8bit
253
+
254
+ # Check environment variable for explicit quantization preference
255
+ quantization_pref = os.environ.get("LOCAL_MODEL_QUANTIZATION", "").lower()
256
+ if quantization_pref == "4bit":
257
+ use_8bit = False
258
+ use_4bit = True
259
+ elif quantization_pref == "8bit":
260
+ use_8bit = True
261
+ use_4bit = False
262
+ elif quantization_pref == "none":
263
+ use_8bit = False
264
+ use_4bit = False
265
+ else:
266
+ # Use defaults based on model size
267
+ use_8bit = os.environ.get("LOCAL_MODEL_8BIT", "true" if default_8bit else "false").lower() == "true"
268
+ use_4bit = os.environ.get("LOCAL_MODEL_4BIT", "true" if default_4bit else "false").lower() == "true"
269
+
270
+ # Try loading with fallback: 8-bit → 4-bit → float16
271
+ model_loaded = False
272
+ quantization_attempts = []
273
+
274
+ if device == "cuda":
275
+ # Attempt 1: Try 8-bit quantization (if requested)
276
+ if use_8bit:
277
+ quantization_attempts.append(("8-bit", True, False))
278
+
279
+ # Attempt 2: Try 4-bit quantization (if 8-bit fails or not requested)
280
+ if use_4bit or (use_8bit and not model_loaded):
281
+ quantization_attempts.append(("4-bit", False, True))
282
+
283
+ # Attempt 3: Fallback to float16 (no quantization)
284
+ quantization_attempts.append(("float16", False, False))
285
+ else:
286
+ # CPU: only float32
287
+ quantization_attempts.append(("float32", False, False))
288
+
289
+ last_error = None
290
+ for attempt_name, try_8bit, try_4bit in quantization_attempts:
291
+ if model_loaded:
292
+ break
293
+
294
+ try:
295
+ load_kwargs = {
296
+ "trust_remote_code": True,
297
+ "low_cpu_mem_usage": True,
298
+ }
299
+
300
+ if device == "cuda":
301
+ load_kwargs["device_map"] = "auto"
302
+
303
+ if try_4bit:
304
+ # Check if bitsandbytes is available
305
+ try:
306
+ import bitsandbytes as bnb
307
+ from transformers import BitsAndBytesConfig
308
+ load_kwargs["quantization_config"] = BitsAndBytesConfig(
309
+ load_in_4bit=True,
310
+ bnb_4bit_compute_dtype=torch.float16
311
+ )
312
+ print(f"[LLM] Attempting to load with 4-bit quantization (~4-5GB VRAM for 7B)", flush=True)
313
+ except ImportError:
314
+ print(f"[LLM] ⚠️ bitsandbytes not available, skipping 4-bit quantization", flush=True)
315
+ raise ImportError("bitsandbytes not available")
316
+ elif try_8bit:
317
+ from transformers import BitsAndBytesConfig
318
+ # Fixed: Remove CPU offload to avoid Int8Params compatibility issue
319
+ load_kwargs["quantization_config"] = BitsAndBytesConfig(
320
+ load_in_8bit=True,
321
+ llm_int8_threshold=6.0
322
+ # Removed: llm_int8_enable_fp32_cpu_offload=True (causes compatibility issues)
323
+ )
324
+ # Removed: max_memory override - let accelerate handle it automatically
325
+ print(f"[LLM] Attempting to load with 8-bit quantization (~7GB VRAM for 7B)", flush=True)
326
+ else:
327
+ load_kwargs["torch_dtype"] = torch.float16
328
+ print(f"[LLM] Attempting to load with float16 (no quantization)", flush=True)
329
+ else:
330
+ load_kwargs["torch_dtype"] = torch.float32
331
+ print(f"[LLM] Attempting to load with float32 (CPU)", flush=True)
332
+
333
+ # Load model
334
+ self.local_model = AutoModelForCausalLM.from_pretrained(
335
+ model_path,
336
+ **load_kwargs
337
+ )
338
+
339
+ # Stop cache monitoring (download complete)
340
+ try:
341
+ from .cache_monitor import get_cache_monitor
342
+ monitor = get_cache_monitor()
343
+ monitor.stop_monitoring(model_path)
344
+ print(f"[LLM] ✅ Model download complete, stopped monitoring", flush=True)
345
+ except:
346
+ pass
347
+
348
+ print(f"[LLM] ✅ Model loaded successfully with {attempt_name} quantization", flush=True)
349
+ logger.info(f"[LLM] ✅ Model loaded successfully with {attempt_name} quantization")
350
+
351
+ # Optional: Compile model for faster inference (PyTorch 2.0+)
352
+ try:
353
+ if hasattr(torch, "compile") and device == "cuda":
354
+ print(f"[LLM] ⚡ Compiling model for faster inference...", flush=True)
355
+ self.local_model = torch.compile(self.local_model, mode="reduce-overhead")
356
+ print(f"[LLM] ✅ Model compiled successfully", flush=True)
357
+ logger.info(f"[LLM] ✅ Model compiled for faster inference")
358
+ except Exception as compile_err:
359
+ print(f"[LLM] ⚠️ Model compilation skipped: {compile_err}", flush=True)
360
+ # Continue without compilation
361
+
362
+ model_loaded = True
363
+
364
+ except Exception as model_load_err:
365
+ last_error = model_load_err
366
+ error_trace = traceback.format_exc()
367
+ print(f"[LLM] ⚠️ Failed to load with {attempt_name}: {model_load_err}", flush=True)
368
+ logger.warning(f"[LLM] ⚠️ Failed to load with {attempt_name}: {model_load_err}")
369
+
370
+ # If this was the last attempt, raise the error
371
+ if attempt_name == quantization_attempts[-1][0]:
372
+ print(f"[LLM] ❌ All quantization attempts failed. Last error: {model_load_err}", flush=True)
373
+ print(f"[LLM] ❌ Model load trace: {error_trace}", flush=True)
374
+ logger.error(f"[LLM] ❌ Model load error: {model_load_err}\n{error_trace}")
375
+ print(f"[LLM] ❌ ERROR: {type(model_load_err).__name__}: {str(model_load_err)}", file=sys.stderr, flush=True)
376
+ traceback.print_exc(file=sys.stderr)
377
+ raise
378
+ else:
379
+ # Try next quantization method
380
+ print(f"[LLM] 🔄 Falling back to next quantization method...", flush=True)
381
+ continue
382
+
383
+ if not model_loaded:
384
+ raise RuntimeError("Failed to load model with any quantization method")
385
+
386
+ if device == "cpu":
387
+ try:
388
+ self.local_model = self.local_model.to(device)
389
+ print(f"[LLM] ✅ Model moved to {device}", flush=True)
390
+ logger.info(f"[LLM] ✅ Model moved to {device}")
391
+ except Exception as move_err:
392
+ error_trace = traceback.format_exc()
393
+ print(f"[LLM] ❌ Model move error: {move_err}", flush=True)
394
+ logger.error(f"[LLM] ❌ Model move error: {move_err}\n{error_trace}")
395
+ print(f"[LLM] ❌ ERROR: {type(move_err).__name__}: {str(move_err)}", file=sys.stderr, flush=True)
396
+ traceback.print_exc(file=sys.stderr)
397
+
398
+ self.local_model.eval() # Set to evaluation mode
399
+ print(f"[LLM] ✅ Local model loaded successfully on {device}", flush=True)
400
+ logger.info(f"[LLM] ✅ Local model loaded successfully on {device}")
401
+
402
+ except ImportError as import_err:
403
+ error_msg = "transformers package not installed, install with: pip install transformers torch"
404
+ print(f"[LLM] ⚠️ {error_msg}", flush=True)
405
+ logger.warning(f"[LLM] ⚠️ {error_msg}")
406
+ print(f"[LLM] ❌ ImportError: {import_err}", file=sys.stderr, flush=True)
407
+ self.local_model = None
408
+ self.local_tokenizer = None
409
+ except Exception as e:
410
+ error_trace = traceback.format_exc()
411
+ print(f"[LLM] ❌ Error loading local model: {e}", flush=True)
412
+ print(f"[LLM] ❌ Full trace: {error_trace}", flush=True)
413
+ logger.error(f"[LLM] ❌ Error loading local model: {e}\n{error_trace}")
414
+ print(f"[LLM] ❌ ERROR: {type(e).__name__}: {str(e)}", file=sys.stderr, flush=True)
415
+ traceback.print_exc(file=sys.stderr)
416
+ print("[LLM] 💡 Tip: Use smaller models like Qwen/Qwen2.5-1.5B-Instruct or Qwen/Qwen2.5-0.5B-Instruct", flush=True)
417
+ self.local_model = None
418
+ self.local_tokenizer = None
419
+
420
+ def is_available(self) -> bool:
421
+ """Check if LLM is available."""
422
+ return (
423
+ self.client is not None or
424
+ self.provider == LLM_PROVIDER_OLLAMA or
425
+ self.provider == LLM_PROVIDER_HUGGINGFACE or
426
+ self.provider == LLM_PROVIDER_API or
427
+ (self.provider == LLM_PROVIDER_LOCAL and self.local_model is not None)
428
+ )
429
+
430
+ def generate_answer(
431
+ self,
432
+ query: str,
433
+ context: Optional[List[Dict[str, Any]]] = None,
434
+ documents: Optional[List[Any]] = None
435
+ ) -> Optional[str]:
436
+ """
437
+ Generate natural language answer from documents.
438
+
439
+ Args:
440
+ query: User query.
441
+ context: Optional conversation context.
442
+ documents: Retrieved documents.
443
+
444
+ Returns:
445
+ Generated answer or None if LLM not available.
446
+ """
447
+ if not self.is_available():
448
+ return None
449
+
450
+ prompt = self._build_prompt(query, context, documents)
451
+ return self._generate_from_prompt(prompt, context=context)
452
+
453
+ def _build_prompt(
454
+ self,
455
+ query: str,
456
+ context: Optional[List[Dict[str, Any]]],
457
+ documents: Optional[List[Any]]
458
+ ) -> str:
459
+ """Build prompt for LLM."""
460
+ prompt_parts = [
461
+ "Bạn là chatbot tư vấn pháp lý của Công an thành phố Huế.",
462
+ "Nhiệm vụ: Trả lời câu hỏi của người dùng dựa trên các văn bản pháp luật và quy định được cung cấp.",
463
+ "",
464
+ f"Câu hỏi của người dùng: {query}",
465
+ ""
466
+ ]
467
+
468
+ if context:
469
+ prompt_parts.append("Ngữ cảnh cuộc hội thoại trước đó:")
470
+ for msg in context[-3:]: # Last 3 messages
471
+ role = "Người dùng" if msg.get("role") == "user" else "Bot"
472
+ content = msg.get("content", "")
473
+ prompt_parts.append(f"{role}: {content}")
474
+ prompt_parts.append("")
475
+
476
+ if documents:
477
+ prompt_parts.append("Các văn bản/quy định liên quan:")
478
+ for i, doc in enumerate(documents[:5], 1):
479
+ # Extract relevant fields based on document type
480
+ doc_text = self._format_document(doc)
481
+ prompt_parts.append(f"{i}. {doc_text}")
482
+ prompt_parts.append("")
483
+ # If documents exist, require strict adherence
484
+ prompt_parts.extend([
485
+ "Yêu cầu QUAN TRỌNG:",
486
+ "- CHỈ trả lời dựa trên thông tin trong 'Các văn bản/quy định liên quan' ở trên",
487
+ "- KHÔNG được tự tạo hoặc suy đoán thông tin không có trong tài liệu",
488
+ "- Khi đã có trích đoạn, phải tổng hợp theo cấu trúc rõ ràng:\n 1) Tóm tắt ngắn gọn nội dung chính\n 2) Liệt kê từng điều/khoản hoặc hình thức xử lý (dùng bullet/đánh số, ghi rõ Điều, Khoản, trang, tên văn bản)\n 3) Kết luận + khuyến nghị áp dụng.",
489
+ "- Luôn nhắc tên văn bản (ví dụ: Quyết định 69/QĐ-TW) và mã điều trong nội dung trả lời.",
490
+ "- Kết thúc phần trả lời bằng câu: '(Xem trích dẫn chi tiết bên dưới)'.",
491
+ "- Không dùng những câu chung chung như 'Rất tiếc' hay 'Tôi không thể giúp', hãy trả lời thẳng vào câu hỏi.",
492
+ "- Chỉ khi HOÀN TOÀN không có thông tin trong tài liệu mới được nói: 'Thông tin trong cơ sở dữ liệu chưa đủ để trả lời câu hỏi này'",
493
+ "- Nếu có mức phạt, phải ghi rõ số tiền (ví dụ: 200.000 - 400.000 VNĐ)",
494
+ "- Nếu có điều khoản, ghi rõ mã điều (ví dụ: Điều 5, Điều 10)",
495
+ "- Nếu có thủ tục, ghi rõ hồ sơ, lệ phí, thời hạn",
496
+ "- Trả lời bằng tiếng Việt, ngắn gọn, dễ hiểu",
497
+ "",
498
+ "Trả lời:"
499
+ ])
500
+ else:
501
+ # No documents - allow general conversation
502
+ prompt_parts.extend([
503
+ "Yêu cầu:",
504
+ "- Trả lời câu hỏi một cách tự nhiên và hữu ích như một chatbot AI thông thường.",
505
+ "- Phản hồi phải có ít nhất 2 đoạn (mỗi đoạn ≥ 2 câu) và tổng cộng ≥ 6 câu.",
506
+ "- Luôn có ít nhất 1 danh sách bullet hoặc đánh số để người dùng dễ làm theo.",
507
+ "- Với chủ đề đời sống (ẩm thực, sức khỏe, du lịch, công nghệ...), hãy đưa ra gợi ý thật đầy đủ, gồm tối thiểu 4-6 câu hoặc 2 đoạn nội dung.",
508
+ "- Nếu câu hỏi cần công thức/nấu ăn: liệt kê NGUYÊN LIỆU rõ ràng (dạng bullet) và CÁC BƯỚC chi tiết (đánh số 1,2,3...). Đề xuất thêm mẹo hoặc biến tấu phù hợp.",
509
+ "- Với các chủ đề mẹo vặt khác, hãy chia nhỏ câu trả lời thành từng phần (Ví dụ: Bối cảnh → Các bước → Lưu ý).",
510
+ "- Tuyệt đối không mở đầu bằng lời xin lỗi hoặc từ chối; hãy đi thẳng vào nội dung chính.",
511
+ "- Nếu câu hỏi liên quan đến pháp luật, thủ tục, mức phạt nhưng không có thông tin trong cơ sở dữ liệu, hãy nói: 'Tôi không tìm thấy thông tin này trong cơ sở dữ liệu. Bạn có thể liên hệ trực tiếp với Công an thành phố Huế để được tư vấn chi tiết hơn.'",
512
+ "- Giữ giọng điệu thân thiện, khích lệ, giống một người bạn hiểu biết.",
513
+ "- Trả lời bằng tiếng Việt, mạch lạc, dễ hiểu, ưu tiên trình bày có tiêu đề/phân đoạn để người đọc dễ làm theo.",
514
+ "",
515
+ "Trả lời:"
516
+ ])
517
+
518
+ return "\n".join(prompt_parts)
519
+
520
+ def _generate_from_prompt(
521
+ self,
522
+ prompt: str,
523
+ context: Optional[List[Dict[str, Any]]] = None
524
+ ) -> Optional[str]:
525
+ """Run current provider with a fully formatted prompt."""
526
+ if not self.is_available():
527
+ return None
528
+
529
+ try:
530
+ print(f"[LLM] Generating answer with provider: {self.provider}", flush=True)
531
+ logger.info(f"[LLM] Generating answer with provider: {self.provider}")
532
+
533
+ if self.provider == LLM_PROVIDER_OPENAI:
534
+ result = self._generate_openai(prompt)
535
+ elif self.provider == LLM_PROVIDER_ANTHROPIC:
536
+ result = self._generate_anthropic(prompt)
537
+ elif self.provider == LLM_PROVIDER_OLLAMA:
538
+ result = self._generate_ollama(prompt)
539
+ elif self.provider == LLM_PROVIDER_HUGGINGFACE:
540
+ result = self._generate_huggingface(prompt)
541
+ elif self.provider == LLM_PROVIDER_LOCAL:
542
+ result = self._generate_local(prompt)
543
+ elif self.provider == LLM_PROVIDER_API:
544
+ result = self._generate_api(prompt, context)
545
+ else:
546
+ result = None
547
+
548
+ if result:
549
+ print(
550
+ f"[LLM] ✅ Answer generated successfully (length: {len(result)})",
551
+ flush=True,
552
+ )
553
+ logger.info(
554
+ f"[LLM] ✅ Answer generated successfully (length: {len(result)})"
555
+ )
556
+ else:
557
+ print(f"[LLM] ⚠️ No answer generated", flush=True)
558
+ logger.warning("[LLM] ⚠️ No answer generated")
559
+
560
+ return result
561
+ except Exception as exc:
562
+ error_trace = traceback.format_exc()
563
+ print(f"[LLM] ❌ Error generating answer: {exc}", flush=True)
564
+ print(f"[LLM] ❌ Full trace: {error_trace}", flush=True)
565
+ logger.error(f"[LLM] ❌ Error generating answer: {exc}\n{error_trace}")
566
+ print(
567
+ f"[LLM] ❌ ERROR: {type(exc).__name__}: {str(exc)}",
568
+ file=sys.stderr,
569
+ flush=True,
570
+ )
571
+ traceback.print_exc(file=sys.stderr)
572
+ return None
573
+
574
+ def generate_structured_legal_answer(
575
+ self,
576
+ query: str,
577
+ documents: List[Any],
578
+ prefill_summary: Optional[str] = None,
579
+ ) -> Optional[LegalAnswer]:
580
+ """
581
+ Ask the LLM for a structured legal answer (summary + details + citations).
582
+ """
583
+ if not self.is_available() or not documents:
584
+ return None
585
+
586
+ parser = get_legal_output_parser()
587
+ guard = get_legal_guard()
588
+ retry_hint: Optional[str] = None
589
+ failure_reason: Optional[str] = None
590
+
591
+ for attempt in range(LEGAL_STRUCTURED_MAX_ATTEMPTS):
592
+ prompt = build_structured_legal_prompt(
593
+ query,
594
+ documents,
595
+ parser,
596
+ prefill_summary=prefill_summary,
597
+ retry_hint=retry_hint,
598
+ )
599
+ logger.debug(
600
+ "[LLM] Structured prompt preview (attempt %s): %s",
601
+ attempt + 1,
602
+ prompt[:600].replace("\n", " "),
603
+ )
604
+ raw_output = self._generate_from_prompt(prompt)
605
+
606
+ if not raw_output:
607
+ failure_reason = "LLM không trả lời"
608
+ retry_hint = (
609
+ "Lần trước bạn không trả về JSON nào. "
610
+ "Hãy in duy nhất một JSON với SUMMARY, DETAILS và CITATIONS."
611
+ )
612
+ continue
613
+
614
+ _write_guardrails_debug(
615
+ f"raw_output_attempt_{attempt + 1}",
616
+ raw_output,
617
+ )
618
+ structured: Optional[LegalAnswer] = None
619
+
620
+ try:
621
+ guard_result = guard.parse(llm_output=raw_output)
622
+ guarded_output = getattr(guard_result, "validated_output", None)
623
+ if guarded_output:
624
+ structured = LegalAnswer.parse_obj(guarded_output)
625
+ _write_guardrails_debug(
626
+ f"guard_validated_attempt_{attempt + 1}",
627
+ json.dumps(guarded_output, ensure_ascii=False),
628
+ )
629
+ except Exception as exc:
630
+ failure_reason = f"Guardrails: {exc}"
631
+ logger.warning("[LLM] Guardrails validation failed: %s", exc)
632
+ _write_guardrails_debug(
633
+ f"guard_error_attempt_{attempt + 1}",
634
+ f"{type(exc).__name__}: {exc}",
635
+ )
636
+
637
+ if not structured:
638
+ structured = parse_structured_output(parser, raw_output or "")
639
+ if structured:
640
+ _write_guardrails_debug(
641
+ f"parser_recovery_attempt_{attempt + 1}",
642
+ structured.json(ensure_ascii=False),
643
+ )
644
+ else:
645
+ retry_hint = (
646
+ "JSON chưa hợp lệ. Hãy dùng cấu trúc SUMMARY/DETAILS/CITATIONS như ví dụ."
647
+ )
648
+ continue
649
+
650
+ is_valid, validation_reason = _validate_structured_answer(structured, documents)
651
+ if is_valid:
652
+ return structured
653
+
654
+ failure_reason = validation_reason or "Không đạt yêu cầu kiểm tra nội dung"
655
+ logger.warning(
656
+ "[LLM] ❌ Structured answer failed validation: %s", failure_reason
657
+ )
658
+ retry_hint = (
659
+ f"Lần trước vi phạm: {failure_reason}. "
660
+ "Hãy dùng đúng tên văn bản và mã điều trong bảng tham chiếu, không bịa thông tin mới."
661
+ )
662
+
663
+ logger.warning(
664
+ "[LLM] ❌ Structured legal parsing failed sau %s lần. Lý do cuối: %s",
665
+ LEGAL_STRUCTURED_MAX_ATTEMPTS,
666
+ failure_reason,
667
+ )
668
+ return None
669
+
670
+ def _format_document(self, doc: Any) -> str:
671
+ """Format document for prompt."""
672
+ doc_type = type(doc).__name__.lower()
673
+
674
+ if "fine" in doc_type:
675
+ parts = [f"Mức phạt: {getattr(doc, 'name', '')}"]
676
+ if hasattr(doc, 'code') and doc.code:
677
+ parts.append(f"Mã: {doc.code}")
678
+ if hasattr(doc, 'min_fine') and hasattr(doc, 'max_fine'):
679
+ if doc.min_fine and doc.max_fine:
680
+ parts.append(f"Số tiền: {doc.min_fine:,.0f} - {doc.max_fine:,.0f} VNĐ")
681
+ return " | ".join(parts)
682
+
683
+ elif "procedure" in doc_type:
684
+ parts = [f"Thủ tục: {getattr(doc, 'title', '')}"]
685
+ if hasattr(doc, 'dossier') and doc.dossier:
686
+ parts.append(f"Hồ sơ: {doc.dossier}")
687
+ if hasattr(doc, 'fee') and doc.fee:
688
+ parts.append(f"Lệ phí: {doc.fee}")
689
+ return " | ".join(parts)
690
+
691
+ elif "office" in doc_type:
692
+ parts = [f"Đơn vị: {getattr(doc, 'unit_name', '')}"]
693
+ if hasattr(doc, 'address') and doc.address:
694
+ parts.append(f"Địa chỉ: {doc.address}")
695
+ if hasattr(doc, 'phone') and doc.phone:
696
+ parts.append(f"Điện thoại: {doc.phone}")
697
+ return " | ".join(parts)
698
+
699
+ elif "advisory" in doc_type:
700
+ parts = [f"Cảnh báo: {getattr(doc, 'title', '')}"]
701
+ if hasattr(doc, 'summary') and doc.summary:
702
+ parts.append(f"Nội dung: {doc.summary[:200]}")
703
+ return " | ".join(parts)
704
+
705
+ elif "legalsection" in doc_type or "legal" in doc_type:
706
+ parts = []
707
+ if hasattr(doc, 'section_code') and doc.section_code:
708
+ parts.append(f"Điều khoản: {doc.section_code}")
709
+ if hasattr(doc, 'section_title') and doc.section_title:
710
+ parts.append(f"Tiêu đề: {doc.section_title}")
711
+ if hasattr(doc, 'document') and doc.document:
712
+ doc_obj = doc.document
713
+ if hasattr(doc_obj, 'title'):
714
+ parts.append(f"Văn bản: {doc_obj.title}")
715
+ if hasattr(doc_obj, 'code'):
716
+ parts.append(f"Mã văn bản: {doc_obj.code}")
717
+ if hasattr(doc, 'content') and doc.content:
718
+ # Provide longer snippet so LLM has enough context (up to ~1500 chars)
719
+ max_len = 1500
720
+ snippet = doc.content[:max_len].strip()
721
+ if len(doc.content) > max_len:
722
+ snippet += "..."
723
+ parts.append(f"Nội dung: {snippet}")
724
+ return " | ".join(parts) if parts else str(doc)
725
+
726
+ return str(doc)
727
+
728
+ def _generate_openai(self, prompt: str) -> Optional[str]:
729
+ """Generate answer using OpenAI."""
730
+ if not self.client:
731
+ return None
732
+
733
+ try:
734
+ response = self.client.chat.completions.create(
735
+ model=os.environ.get("OPENAI_MODEL", "gpt-3.5-turbo"),
736
+ messages=[
737
+ {"role": "system", "content": "Bạn là chatbot tư vấn chuyên nghiệp."},
738
+ {"role": "user", "content": prompt}
739
+ ],
740
+ temperature=0.7,
741
+ max_tokens=500
742
+ )
743
+ return response.choices[0].message.content
744
+ except Exception as e:
745
+ print(f"OpenAI API error: {e}")
746
+ return None
747
+
748
+ def _generate_anthropic(self, prompt: str) -> Optional[str]:
749
+ """Generate answer using Anthropic Claude."""
750
+ if not self.client:
751
+ return None
752
+
753
+ try:
754
+ message = self.client.messages.create(
755
+ model=os.environ.get("ANTHROPIC_MODEL", "claude-3-5-sonnet-20241022"),
756
+ max_tokens=500,
757
+ messages=[
758
+ {"role": "user", "content": prompt}
759
+ ]
760
+ )
761
+ return message.content[0].text
762
+ except Exception as e:
763
+ print(f"Anthropic API error: {e}")
764
+ return None
765
+
766
+ def _generate_ollama(self, prompt: str) -> Optional[str]:
767
+ """Generate answer using Ollama (local LLM)."""
768
+ try:
769
+ import requests
770
+ model = getattr(self, 'ollama_model', os.environ.get("OLLAMA_MODEL", "qwen2.5:7b"))
771
+
772
+ response = requests.post(
773
+ f"{self.ollama_base_url}/api/generate",
774
+ json={
775
+ "model": model,
776
+ "prompt": prompt,
777
+ "stream": False,
778
+ "options": {
779
+ "temperature": 0.7,
780
+ "top_p": 0.9,
781
+ "num_predict": 500
782
+ }
783
+ },
784
+ timeout=60
785
+ )
786
+
787
+ if response.status_code == 200:
788
+ return response.json().get("response")
789
+ return None
790
+ except Exception as e:
791
+ print(f"Ollama API error: {e}")
792
+ return None
793
+
794
+ def _generate_huggingface(self, prompt: str) -> Optional[str]:
795
+ """Generate answer using Hugging Face Inference API."""
796
+ try:
797
+ import requests
798
+
799
+ api_url = f"https://api-inference.huggingface.co/models/{self.hf_model}"
800
+ headers = {}
801
+ if hasattr(self, 'hf_api_key') and self.hf_api_key:
802
+ headers["Authorization"] = f"Bearer {self.hf_api_key}"
803
+
804
+ response = requests.post(
805
+ api_url,
806
+ headers=headers,
807
+ json={
808
+ "inputs": prompt,
809
+ "parameters": {
810
+ "temperature": 0.7,
811
+ "max_new_tokens": 500,
812
+ "return_full_text": False
813
+ }
814
+ },
815
+ timeout=60
816
+ )
817
+
818
+ if response.status_code == 200:
819
+ result = response.json()
820
+ if isinstance(result, list) and len(result) > 0:
821
+ return result[0].get("generated_text", "")
822
+ elif isinstance(result, dict):
823
+ return result.get("generated_text", "")
824
+ elif response.status_code == 503:
825
+ # Model is loading, wait and retry
826
+ print("⚠️ Model is loading, please wait...")
827
+ return None
828
+ else:
829
+ print(f"Hugging Face API error: {response.status_code} - {response.text}")
830
+ return None
831
+ except Exception as e:
832
+ print(f"Hugging Face API error: {e}")
833
+ return None
834
+
835
+ def _generate_local(self, prompt: str) -> Optional[str]:
836
+ """Generate answer using local Hugging Face Transformers model."""
837
+ if self.local_model is None or self.local_tokenizer is None:
838
+ return None
839
+
840
+ try:
841
+ import torch
842
+
843
+ # Format prompt for Qwen models
844
+ messages = [
845
+ {"role": "system", "content": "Bạn là chatbot tư vấn chuyên nghiệp."},
846
+ {"role": "user", "content": prompt}
847
+ ]
848
+
849
+ # Apply chat template if available
850
+ if hasattr(self.local_tokenizer, "apply_chat_template"):
851
+ text = self.local_tokenizer.apply_chat_template(
852
+ messages,
853
+ tokenize=False,
854
+ add_generation_prompt=True
855
+ )
856
+ else:
857
+ text = prompt
858
+
859
+ # Tokenize
860
+ inputs = self.local_tokenizer(text, return_tensors="pt")
861
+
862
+ # Move to device
863
+ device = next(self.local_model.parameters()).device
864
+ inputs = {k: v.to(device) for k, v in inputs.items()}
865
+
866
+ # Generate with optimized parameters for faster inference
867
+ with torch.no_grad():
868
+ # Use greedy decoding for faster generation (can switch to sampling if needed)
869
+ outputs = self.local_model.generate(
870
+ **inputs,
871
+ max_new_tokens=150, # Reduced from 500 for faster generation
872
+ temperature=0.6, # Lower temperature for faster, more deterministic output
873
+ top_p=0.85, # Slightly lower top_p
874
+ do_sample=True,
875
+ use_cache=True, # Enable KV cache for faster generation
876
+ pad_token_id=self.local_tokenizer.eos_token_id,
877
+ repetition_penalty=1.1 # Prevent repetition
878
+ # Removed early_stopping (only works with num_beams > 1)
879
+ )
880
+
881
+ # Decode
882
+ generated_text = self.local_tokenizer.decode(
883
+ outputs[0][inputs["input_ids"].shape[1]:],
884
+ skip_special_tokens=True
885
+ )
886
+
887
+ return generated_text.strip()
888
+
889
+ except TypeError as e:
890
+ # Check for Int8Params compatibility error
891
+ if "_is_hf_initialized" in str(e) or "Int8Params" in str(e):
892
+ error_msg = (
893
+ f"[LLM] ❌ Int8Params compatibility error: {e}\n"
894
+ f"[LLM] 💡 This error occurs when using 8-bit quantization with incompatible library versions.\n"
895
+ f"[LLM] 💡 Solutions:\n"
896
+ f"[LLM] 1. Set LOCAL_MODEL_QUANTIZATION=4bit to use 4-bit quantization instead\n"
897
+ f"[LLM] 2. Set LOCAL_MODEL_QUANTIZATION=none to disable quantization\n"
898
+ f"[LLM] 3. Use API mode (LLM_PROVIDER=api) to avoid local model issues\n"
899
+ f"[LLM] 4. Use a smaller model like Qwen/Qwen2.5-1.5B-Instruct"
900
+ )
901
+ print(error_msg, flush=True)
902
+ logger.error(f"[LLM] ❌ Int8Params compatibility error: {e}")
903
+ print(f"[LLM] ❌ ERROR: {type(e).__name__}: {str(e)}", file=sys.stderr, flush=True)
904
+ return None
905
+ else:
906
+ # Other TypeError, re-raise to be caught by general handler
907
+ raise
908
+ except Exception as e:
909
+ error_trace = traceback.format_exc()
910
+ print(f"[LLM] ❌ Local model generation error: {e}", flush=True)
911
+ print(f"[LLM] ❌ Full trace: {error_trace}", flush=True)
912
+ logger.error(f"[LLM] ❌ Local model generation error: {e}\n{error_trace}")
913
+ print(f"[LLM] ❌ ERROR: {type(e).__name__}: {str(e)}", file=sys.stderr, flush=True)
914
+ traceback.print_exc(file=sys.stderr)
915
+ return None
916
+
917
+ def _generate_api(self, prompt: str, context: Optional[List[Dict[str, Any]]] = None) -> Optional[str]:
918
+ """Generate answer by calling HF Spaces API.
919
+
920
+ Args:
921
+ prompt: Full prompt including query and documents context.
922
+ context: Optional conversation context (not used in API mode, handled by HF Spaces).
923
+ """
924
+ if not self.api_base_url:
925
+ return None
926
+
927
+ try:
928
+ import requests
929
+
930
+ # Prepare request payload
931
+ # Send the full prompt (with documents) as the message to HF Spaces
932
+ # This ensures HF Spaces receives all context from retrieved documents
933
+ payload = {
934
+ "message": prompt,
935
+ "reset_session": False
936
+ }
937
+
938
+ # Only add session_id if we have a valid session context
939
+ # For now, we'll omit it and let the API generate a new one
940
+
941
+ # Add context if available (API may support this in future)
942
+ # For now, context is handled by the API internally
943
+
944
+ # Call API endpoint
945
+ api_url = f"{self.api_base_url}/chatbot/chat/"
946
+ print(f"[LLM] 🔗 Calling API: {api_url}", flush=True)
947
+ print(f"[LLM] 📤 Payload: {payload}", flush=True)
948
+
949
+ response = requests.post(
950
+ api_url,
951
+ json=payload,
952
+ headers={"Content-Type": "application/json"},
953
+ timeout=60
954
+ )
955
+
956
+ print(f"[LLM] 📥 Response status: {response.status_code}", flush=True)
957
+ print(f"[LLM] 📥 Response headers: {dict(response.headers)}", flush=True)
958
+
959
+ if response.status_code == 200:
960
+ try:
961
+ result = response.json()
962
+ print(f"[LLM] 📥 Response JSON: {result}", flush=True)
963
+ # Extract message from response
964
+ if isinstance(result, dict):
965
+ message = result.get("message", None)
966
+ if message:
967
+ print(f"[LLM] ✅ Got message from API (length: {len(message)})", flush=True)
968
+ return message
969
+ else:
970
+ print(f"[LLM] ⚠️ Response is not a dict: {type(result)}", flush=True)
971
+ return None
972
+ except ValueError as e:
973
+ print(f"[LLM] ❌ JSON decode error: {e}", flush=True)
974
+ print(f"[LLM] ❌ Response text: {response.text[:500]}", flush=True)
975
+ return None
976
+ elif response.status_code == 503:
977
+ # Service unavailable - model might be loading
978
+ print("[LLM] ⚠️ API service is loading, please wait...", flush=True)
979
+ return None
980
+ else:
981
+ print(f"[LLM] ❌ API error: {response.status_code} - {response.text[:500]}", flush=True)
982
+ return None
983
+ except requests.exceptions.Timeout:
984
+ print("[LLM] ❌ API request timeout")
985
+ return None
986
+ except requests.exceptions.ConnectionError as e:
987
+ print(f"[LLM] ❌ API connection error: {e}")
988
+ return None
989
+ except Exception as e:
990
+ error_trace = traceback.format_exc()
991
+ print(f"[LLM] ❌ API mode error: {e}", flush=True)
992
+ print(f"[LLM] ❌ Full trace: {error_trace}", flush=True)
993
+ logger.error(f"[LLM] ❌ API mode error: {e}\n{error_trace}")
994
+ return None
995
+
996
+ def summarize_context(self, messages: List[Dict[str, Any]], max_length: int = 200) -> str:
997
+ """
998
+ Summarize conversation context.
999
+
1000
+ Args:
1001
+ messages: List of conversation messages.
1002
+ max_length: Maximum summary length.
1003
+
1004
+ Returns:
1005
+ Summary string.
1006
+ """
1007
+ if not messages:
1008
+ return ""
1009
+
1010
+ # Simple summarization: extract key entities and intents
1011
+ intents = []
1012
+ entities = set()
1013
+
1014
+ for msg in messages:
1015
+ if msg.get("intent"):
1016
+ intents.append(msg["intent"])
1017
+ if msg.get("entities"):
1018
+ for key, value in msg["entities"].items():
1019
+ if isinstance(value, str):
1020
+ entities.add(value)
1021
+ elif isinstance(value, list):
1022
+ entities.update(value)
1023
+
1024
+ summary_parts = []
1025
+ if intents:
1026
+ unique_intents = list(set(intents))
1027
+ summary_parts.append(f"Chủ đề: {', '.join(unique_intents)}")
1028
+ if entities:
1029
+ summary_parts.append(f"Thông tin: {', '.join(list(entities)[:5])}")
1030
+
1031
+ summary = ". ".join(summary_parts)
1032
+ return summary[:max_length] if len(summary) > max_length else summary
1033
+
1034
+ def extract_entities_llm(self, query: str) -> Dict[str, Any]:
1035
+ """
1036
+ Extract entities using LLM.
1037
+
1038
+ Args:
1039
+ query: User query.
1040
+
1041
+ Returns:
1042
+ Dictionary of extracted entities.
1043
+ """
1044
+ if not self.is_available():
1045
+ return {}
1046
+
1047
+ prompt = f"""
1048
+ Trích xuất các thực thể từ câu hỏi sau:
1049
+ "{query}"
1050
+
1051
+ Các loại thực thể cần tìm:
1052
+ - fine_code: Mã vi phạm (V001, V002, ...)
1053
+ - fine_name: Tên vi phạm
1054
+ - procedure_name: Tên thủ tục
1055
+ - office_name: Tên đơn vị
1056
+
1057
+ Trả lời dưới dạng JSON: {{"fine_code": "...", "fine_name": "...", ...}}
1058
+ Nếu không có, trả về {{}}.
1059
+ """
1060
+
1061
+ try:
1062
+ if self.provider == LLM_PROVIDER_OPENAI:
1063
+ response = self._generate_openai(prompt)
1064
+ elif self.provider == LLM_PROVIDER_ANTHROPIC:
1065
+ response = self._generate_anthropic(prompt)
1066
+ elif self.provider == LLM_PROVIDER_OLLAMA:
1067
+ response = self._generate_ollama(prompt)
1068
+ elif self.provider == LLM_PROVIDER_HUGGINGFACE:
1069
+ response = self._generate_huggingface(prompt)
1070
+ elif self.provider == LLM_PROVIDER_LOCAL:
1071
+ response = self._generate_local(prompt)
1072
+ elif self.provider == LLM_PROVIDER_API:
1073
+ # For API mode, we can't extract entities directly
1074
+ # Return empty dict
1075
+ return {}
1076
+ else:
1077
+ return {}
1078
+
1079
+ if response:
1080
+ # Try to extract JSON from response
1081
+ json_match = re.search(r'\{[^}]+\}', response)
1082
+ if json_match:
1083
+ return json.loads(json_match.group())
1084
+ except Exception as e:
1085
+ print(f"Error extracting entities with LLM: {e}")
1086
+
1087
+ return {}
1088
+
1089
+
1090
+ # Global LLM generator instance
1091
+ _llm_generator: Optional[LLMGenerator] = None
1092
+ _last_provider: Optional[str] = None
1093
+
1094
+ def get_llm_generator() -> Optional[LLMGenerator]:
1095
+ """Get or create LLM generator instance.
1096
+
1097
+ Recreates instance if provider changed (e.g., from local to api).
1098
+ """
1099
+ global _llm_generator, _last_provider
1100
+
1101
+ # Get current provider from env
1102
+ current_provider = os.environ.get("LLM_PROVIDER", LLM_PROVIDER_NONE).lower()
1103
+
1104
+ # Recreate if provider changed or instance doesn't exist
1105
+ if _llm_generator is None or _last_provider != current_provider:
1106
+ _llm_generator = LLMGenerator()
1107
+ _last_provider = current_provider
1108
+ print(f"[LLM] 🔄 Recreated LLM generator with provider: {current_provider}", flush=True)
1109
+
1110
+ return _llm_generator if _llm_generator.is_available() else None