fix incorrect LLM message
Browse files- app/llm.py +10 -6
- app/main.py +0 -2
app/llm.py
CHANGED
|
@@ -102,18 +102,21 @@ class LLMClient:
|
|
| 102 |
"""
|
| 103 |
logger.info(f"[LLM] generate_text - provider: {self.provider}, prompt: {prompt}")
|
| 104 |
try:
|
|
|
|
| 105 |
if self.provider == "openai":
|
| 106 |
-
|
| 107 |
elif self.provider == "huggingface":
|
| 108 |
-
|
| 109 |
elif self.provider == "local":
|
| 110 |
-
|
| 111 |
elif self.provider == "custom":
|
| 112 |
-
|
| 113 |
elif self.provider == "hfs":
|
| 114 |
-
|
| 115 |
else:
|
| 116 |
raise ValueError(f"Unsupported provider: {self.provider}")
|
|
|
|
|
|
|
| 117 |
except Exception as e:
|
| 118 |
logger.error(f"[LLM] Error generating text with {self.provider}: {e}")
|
| 119 |
raise
|
|
@@ -169,7 +172,8 @@ class LLMClient:
|
|
| 169 |
headers = {}
|
| 170 |
if self.api_key:
|
| 171 |
headers["Authorization"] = f"Bearer {self.api_key}"
|
| 172 |
-
response = await call_endpoint_with_retry(self._client, endpoint, payload, 3,
|
|
|
|
| 173 |
if response is not None:
|
| 174 |
data = response.json()
|
| 175 |
if 'response' in data:
|
|
|
|
| 102 |
"""
|
| 103 |
logger.info(f"[LLM] generate_text - provider: {self.provider}, prompt: {prompt}")
|
| 104 |
try:
|
| 105 |
+
result = None
|
| 106 |
if self.provider == "openai":
|
| 107 |
+
result = await self._generate_openai(prompt, system_prompt, **kwargs)
|
| 108 |
elif self.provider == "huggingface":
|
| 109 |
+
result = await self._generate_huggingface(prompt, **kwargs)
|
| 110 |
elif self.provider == "local":
|
| 111 |
+
result = await self._generate_local(prompt, **kwargs)
|
| 112 |
elif self.provider == "custom":
|
| 113 |
+
result = await self._generate_custom(prompt, **kwargs)
|
| 114 |
elif self.provider == "hfs":
|
| 115 |
+
result = await self._generate_hfs(prompt, **kwargs)
|
| 116 |
else:
|
| 117 |
raise ValueError(f"Unsupported provider: {self.provider}")
|
| 118 |
+
logger.info(f"[LLM] generate_text - provider: {self.provider}\n\t result: {result}")
|
| 119 |
+
return result
|
| 120 |
except Exception as e:
|
| 121 |
logger.error(f"[LLM] Error generating text with {self.provider}: {e}")
|
| 122 |
raise
|
|
|
|
| 172 |
headers = {}
|
| 173 |
if self.api_key:
|
| 174 |
headers["Authorization"] = f"Bearer {self.api_key}"
|
| 175 |
+
response = await call_endpoint_with_retry(self._client, endpoint, payload, 3, 500, headers=headers)
|
| 176 |
+
logger.info(f"[LLM] generate_text - provider: {self.provider}\n\t response: {response}")
|
| 177 |
if response is not None:
|
| 178 |
data = response.json()
|
| 179 |
if 'response' in data:
|
app/main.py
CHANGED
|
@@ -430,8 +430,6 @@ async def format_search_results(question: str, matches: List[Dict[str, Any]]) ->
|
|
| 430 |
f"\n\nCâu hỏi của người dùng: {question}\n"
|
| 431 |
)
|
| 432 |
|
| 433 |
-
logger.info(f"[DEBUG] prompt:\n {prompt}")
|
| 434 |
-
|
| 435 |
# Gọi LLM để sinh câu trả lời, fallback nếu lỗi
|
| 436 |
try:
|
| 437 |
answer = await llm_client.generate_text(prompt)
|
|
|
|
| 430 |
f"\n\nCâu hỏi của người dùng: {question}\n"
|
| 431 |
)
|
| 432 |
|
|
|
|
|
|
|
| 433 |
# Gọi LLM để sinh câu trả lời, fallback nếu lỗi
|
| 434 |
try:
|
| 435 |
answer = await llm_client.generate_text(prompt)
|