Spaces:
Sleeping
Sleeping
File size: 3,553 Bytes
8a18ce0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
"""Internationalization support for the RAG demo"""
TRANSLATIONS = {
"en": {
# Corpus tab
"corpus_management": "Corpus Management",
"corpus_description": "Upload a PDF document or use the default corpus. The document will be split into chunks for retrieval.",
"upload_pdf": "Upload PDF",
"chunk_size": "Chunk Size (characters)",
"chunk_overlap": "Chunk Overlap (characters)",
"process_corpus": "Process Corpus",
"status": "Status",
# Retrieval tab
"retrieval_config": "Retrieval Configuration",
"embedding_model": "Embedding Model",
"top_k": "Top K (number of chunks to retrieve)",
"similarity_threshold": "Similarity Threshold (minimum score)",
# Generation tab
"generation_config": "Generation Configuration",
"llm_model": "Language Model",
"temperature": "Temperature (creativity)",
"max_tokens": "Max Tokens (response length)",
# Query tab
"ask_question": "Ask a Question",
"your_question": "Your Question",
"question_placeholder": "Enter your question here...",
"example_questions": "Example Questions",
"submit_query": "Submit Query",
"answer": "Answer",
"retrieved_chunks": "Retrieved Chunks",
"prompt_sent": "Prompt Sent to LLM",
"errors": "Errors",
# Results
"similarity_score": "Similarity Score",
"no_corpus": "Please process a corpus first in the Corpus tab.",
# Messages
"error": "Error",
"success": "Success",
"processing": "Processing...",
},
"fr": {
# Onglet Corpus
"corpus_management": "Gestion du Corpus",
"corpus_description": "Téléchargez un document PDF ou utilisez le corpus par défaut. Le document sera divisé en chunks pour la récupération.",
"upload_pdf": "Télécharger un PDF",
"chunk_size": "Taille des Chunks (caractères)",
"chunk_overlap": "Chevauchement des Chunks (caractères)",
"process_corpus": "Traiter le Corpus",
"status": "Statut",
# Onglet Retrieval
"retrieval_config": "Configuration du Retrieval",
"embedding_model": "Modèle d'Embedding",
"top_k": "Top K (nombre de chunks à récupérer)",
"similarity_threshold": "Seuil de Similarité (score minimum)",
# Onglet Génération
"generation_config": "Configuration de la Génération",
"llm_model": "Modèle de Langage",
"temperature": "Température (créativité)",
"max_tokens": "Max Tokens (longueur de la réponse)",
# Onglet Query
"ask_question": "Poser une Question",
"your_question": "Votre Question",
"question_placeholder": "Entrez votre question ici...",
"example_questions": "Questions d'Exemple",
"submit_query": "Soumettre la Question",
"answer": "Réponse",
"retrieved_chunks": "Chunks Récupérés",
"prompt_sent": "Prompt Envoyé au LLM",
"errors": "Erreurs",
# Résultats
"similarity_score": "Score de Similarité",
"no_corpus": "Veuillez d'abord traiter un corpus dans l'onglet Corpus.",
# Messages
"error": "Erreur",
"success": "Succès",
"processing": "Traitement en cours...",
}
}
def get_text(key, language="en"):
"""Get translated text for a given key and language"""
return TRANSLATIONS.get(language, TRANSLATIONS["en"]).get(key, key)
|