File size: 3,360 Bytes
fc5e5ef
fd00048
fc5e5ef
 
 
fd00048
 
 
 
 
 
 
 
08968a5
fc5e5ef
08968a5
fd00048
 
fc5e5ef
fd00048
fc5e5ef
e8e1ae7
08968a5
fc5e5ef
08968a5
 
 
 
 
 
fc5e5ef
fd00048
fc5e5ef
 
 
 
08968a5
fd00048
fc5e5ef
fd00048
fc5e5ef
 
 
 
08968a5
fc5e5ef
08968a5
fc5e5ef
 
fd00048
fc5e5ef
 
 
fd00048
 
edc1a38
fc5e5ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd00048
fc5e5ef
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import os
import logging
import gradio as gr
from huggingface_hub import InferenceClient

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s | %(levelname)s | %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
)
logger = logging.getLogger(__name__)

# Environment variables for configuration
HF_TOKEN = os.environ.get("HF_TOKEN", "")

logger.info(f"HF_TOKEN configured: {bool(HF_TOKEN)}")

client = InferenceClient(token=HF_TOKEN) if HF_TOKEN else InferenceClient()
logger.info("InferenceClient initialized")


# Language pairs with their MarianMT models (configurable via env vars)
LANGUAGE_PAIRS = {
    "English β†’ French": os.environ.get("MODEL_EN_FR", "Helsinki-NLP/opus-mt-en-fr"),
    "English β†’ Spanish": os.environ.get("MODEL_EN_ES", "Helsinki-NLP/opus-mt-en-es"),
    "English β†’ German": os.environ.get("MODEL_EN_DE", "Helsinki-NLP/opus-mt-en-de"),
    "French β†’ English": os.environ.get("MODEL_FR_EN", "Helsinki-NLP/opus-mt-fr-en"),
    "Spanish β†’ English": os.environ.get("MODEL_ES_EN", "Helsinki-NLP/opus-mt-es-en"),
    "German β†’ English": os.environ.get("MODEL_DE_EN", "Helsinki-NLP/opus-mt-de-en"),
}
logger.info(f"Loaded {len(LANGUAGE_PAIRS)} language pairs")


def translate(text: str, language_pair: str) -> str:
    """Translate text using selected language pair."""
    logger.info(f"translate() called | text_len={len(text)} | pair={language_pair}")
    
    if not text.strip():
        logger.warning("Empty text received")
        return "πŸ“ Enter text to translate!"
    
    try:
        model = LANGUAGE_PAIRS[language_pair]
        logger.info(f"Calling translation | model={model}")
        result = client.translation(text, model=model)
        logger.info(f"Translation: {result.translation_text[:50]}...")
        return result.translation_text
    except Exception as e:
        logger.error(f"API error: {e}")
        return f"❌ Error: {e}"


logger.info("Building Gradio interface...")

with gr.Blocks(title="Instant Translator") as demo:
    gr.Markdown("# 🌍 Instant Translator\nTranslate text between languages instantly!")

    with gr.Row(equal_height=True):
        with gr.Column():
            input_text = gr.Textbox(
                label="Source text",
                placeholder="Hello, how are you today?",
                lines=4,
                autofocus=True,
            )
            language_pair = gr.Dropdown(
                choices=list(LANGUAGE_PAIRS.keys()),
                value="English β†’ French",
                label="Language pair",
            )
            btn = gr.Button("Translate πŸš€", variant="primary")

        with gr.Column():
            output_text = gr.Textbox(
                label="Translation",
                lines=4,
                interactive=False,
            )

    btn.click(translate, inputs=[input_text, language_pair], outputs=output_text)
    input_text.submit(translate, inputs=[input_text, language_pair], outputs=output_text)

    gr.Examples(
        examples=[
            ["Hello, how are you today?", "English β†’ French"],
            ["Machine learning is fascinating.", "English β†’ Spanish"],
            ["I love programming with Python.", "English β†’ German"],
        ],
        inputs=[input_text, language_pair],
    )

demo.queue()
logger.info("Starting Gradio server...")
demo.launch()