Spaces:
Running
Running
Implement enhanced translation functionality with reliability checks and fallback mechanism
Browse files
app.py
CHANGED
|
@@ -180,7 +180,62 @@ def get_context(message, conversation_id):
|
|
| 180 |
except Exception as e:
|
| 181 |
logger.error(f"Error getting context: {str(e)}")
|
| 182 |
return ""
|
| 183 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
def post_process_response(user_message, bot_response):
|
| 185 |
"""Enhanced post-processing of bot responses to ensure correct language"""
|
| 186 |
try:
|
|
@@ -239,40 +294,6 @@ def post_process_response(user_message, bot_response):
|
|
| 239 |
logger.error(f"Post-processing error: {e}")
|
| 240 |
return bot_response
|
| 241 |
|
| 242 |
-
def post_process_response(user_message, bot_response):
|
| 243 |
-
"""Check if the response language matches the user's language and translate if needed"""
|
| 244 |
-
try:
|
| 245 |
-
user_lang = detect_language(user_message)
|
| 246 |
-
bot_lang = detect_language(bot_response)
|
| 247 |
-
|
| 248 |
-
# Check if user language is supported using LanguageUtils
|
| 249 |
-
if user_lang not in LanguageUtils.SUPPORTED_LANGUAGES:
|
| 250 |
-
apology = ("I apologize, but I cannot respond in your language. "
|
| 251 |
-
"I will answer in English instead.\n\n")
|
| 252 |
-
return apology + bot_response
|
| 253 |
-
|
| 254 |
-
if user_lang != bot_lang and len(bot_response.strip()) > 20:
|
| 255 |
-
logger.warning(f"Language mismatch detected! User: {user_lang}, Bot: {bot_lang}")
|
| 256 |
-
|
| 257 |
-
translated_response = translate_with_llm(bot_response, user_lang)
|
| 258 |
-
translated_lang = detect_language(translated_response)
|
| 259 |
-
|
| 260 |
-
if translated_lang == user_lang:
|
| 261 |
-
logger.info(f"Response automatically translated from {bot_lang} to {user_lang}")
|
| 262 |
-
return translated_response
|
| 263 |
-
else:
|
| 264 |
-
logger.error(f"Translation failed: got {translated_lang} instead of {user_lang}")
|
| 265 |
-
# If translation fails, return English response with apology
|
| 266 |
-
apology = ("I apologize, but I cannot translate my response to your language. "
|
| 267 |
-
"Here is my answer in English:\n\n")
|
| 268 |
-
return apology + bot_response
|
| 269 |
-
|
| 270 |
-
return bot_response
|
| 271 |
-
|
| 272 |
-
except Exception as e:
|
| 273 |
-
logger.error(f"Post-processing error: {e}")
|
| 274 |
-
return bot_response
|
| 275 |
-
|
| 276 |
def load_vector_store():
|
| 277 |
"""Load knowledge base from dataset"""
|
| 278 |
try:
|
|
|
|
| 180 |
except Exception as e:
|
| 181 |
logger.error(f"Error getting context: {str(e)}")
|
| 182 |
return ""
|
| 183 |
+
|
| 184 |
+
def translate_with_llm(text: str, target_lang: str) -> str:
|
| 185 |
+
"""Translate text using the active LLM with enhanced reliability"""
|
| 186 |
+
try:
|
| 187 |
+
# Get language name for more natural prompt
|
| 188 |
+
lang_name = LanguageUtils.get_language_name(target_lang)
|
| 189 |
+
|
| 190 |
+
prompt = (
|
| 191 |
+
f"You are a professional translator. Translate the following text to {lang_name} ({target_lang}). "
|
| 192 |
+
f"Keep the same formatting, links, and technical terms. "
|
| 193 |
+
f"Maintain the same tone and style. "
|
| 194 |
+
f"Respond ONLY with the direct translation without any explanations or additional text:\n\n"
|
| 195 |
+
f"{text}"
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
response = client.chat_completion(
|
| 199 |
+
messages=[
|
| 200 |
+
{"role": "system", "content": "You are a professional translator. Respond ONLY with the translation."},
|
| 201 |
+
{"role": "user", "content": prompt}
|
| 202 |
+
],
|
| 203 |
+
max_tokens=ACTIVE_MODEL['parameters']['max_length'],
|
| 204 |
+
temperature=0.3, # Lower temperature for more reliable output
|
| 205 |
+
top_p=0.95,
|
| 206 |
+
stream=False
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
translated_text = response.choices[0].message.content.strip()
|
| 210 |
+
|
| 211 |
+
# Verify translation success - check if we still have English
|
| 212 |
+
if target_lang != 'en':
|
| 213 |
+
# Quick check - if key English words are still present, translation might have failed
|
| 214 |
+
english_indicators = ["I apologize", "Sorry", "I cannot", "the following", "is a translation"]
|
| 215 |
+
if any(indicator in translated_text for indicator in english_indicators):
|
| 216 |
+
logger.warning(f"Translation might have failed for {target_lang}, found English indicators")
|
| 217 |
+
|
| 218 |
+
# Try one more time with a simplified prompt
|
| 219 |
+
retry_prompt = f"Translate this to {lang_name}:\n\n{text}"
|
| 220 |
+
retry_response = client.chat_completion(
|
| 221 |
+
messages=[
|
| 222 |
+
{"role": "system", "content": "You are a translator."},
|
| 223 |
+
{"role": "user", "content": retry_prompt}
|
| 224 |
+
],
|
| 225 |
+
max_tokens=ACTIVE_MODEL['parameters']['max_length'],
|
| 226 |
+
temperature=0.3,
|
| 227 |
+
top_p=0.95,
|
| 228 |
+
stream=False
|
| 229 |
+
)
|
| 230 |
+
|
| 231 |
+
translated_text = retry_response.choices[0].message.content.strip()
|
| 232 |
+
|
| 233 |
+
return translated_text
|
| 234 |
+
|
| 235 |
+
except Exception as e:
|
| 236 |
+
logger.error(f"Translation failed: {e}")
|
| 237 |
+
return text
|
| 238 |
+
|
| 239 |
def post_process_response(user_message, bot_response):
|
| 240 |
"""Enhanced post-processing of bot responses to ensure correct language"""
|
| 241 |
try:
|
|
|
|
| 294 |
logger.error(f"Post-processing error: {e}")
|
| 295 |
return bot_response
|
| 296 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 297 |
def load_vector_store():
|
| 298 |
"""Load knowledge base from dataset"""
|
| 299 |
try:
|