import os os.environ["GRADIO_SSR_MODE"] = "false" import torch import spaces import gradio as gr from transformers import AutoModelForImageTextToText, AutoProcessor from huggingface_hub import login from fastapi import Request from fastapi.responses import JSONResponse # Login with HF token if available hf_token = os.environ.get("HF_TOKEN") if hf_token: login(token=hf_token) # Language codes LANGUAGES = { "en": "English", "de": "German", "fr": "French", "es": "Spanish", "it": "Italian", "pt": "Portuguese", "nl": "Dutch", "pl": "Polish", "cs": "Czech", "ru": "Russian", "uk": "Ukrainian", "zh": "Chinese", "ja": "Japanese", "ko": "Korean", "ar": "Arabic", "hi": "Hindi", "bn": "Bengali", "tr": "Turkish", "vi": "Vietnamese", "th": "Thai", "id": "Indonesian", "ms": "Malay", "sv": "Swedish", "no": "Norwegian", "da": "Danish", "fi": "Finnish", "el": "Greek", "he": "Hebrew", "ro": "Romanian", "hu": "Hungarian", "bg": "Bulgarian", "hr": "Croatian", "sk": "Slovak", "sl": "Slovenian", "sr": "Serbian", "lt": "Lithuanian", "lv": "Latvian", "et": "Estonian", "sw": "Swahili", "ta": "Tamil", "te": "Telugu", "mr": "Marathi", "gu": "Gujarati", "kn": "Kannada", "ml": "Malayalam", "pa": "Punjabi", "ur": "Urdu", "fa": "Persian", "fil": "Filipino", "ca": "Catalan", "gl": "Galician", "eu": "Basque", "cy": "Welsh", "ga": "Irish", } model_id = "google/translategemma-4b-it" print("Loading processor...") processor = AutoProcessor.from_pretrained(model_id) print("Processor loaded!") model = None def load_model(): global model if model is None: print("Loading model...") device = "cuda" if torch.cuda.is_available() else "cpu" model = AutoModelForImageTextToText.from_pretrained( model_id, torch_dtype=torch.bfloat16, ).to(device).eval() print(f"Model loaded on {device.upper()}!") return model @spaces.GPU(duration=120) def translate(text: str, source_lang: str, target_lang: str) -> str: if not text or not text.strip(): return "" m = load_model() messages = [ { "role": "user", "content": [ { "type": "text", "source_lang_code": source_lang, "target_lang_code": target_lang, "text": text, } ], } ] device = "cuda" if torch.cuda.is_available() else "cpu" inputs = processor.apply_chat_template( messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt" ).to(device) with torch.inference_mode(): generation = m.generate(**inputs, max_new_tokens=1024, do_sample=False) input_len = inputs["input_ids"].shape[1] output = processor.decode(generation[0][input_len:], skip_special_tokens=True) return output.strip() LANG_CODES = list(LANGUAGES.keys()) LANG_CHOICES = [f"{code} ({name})" for code, name in LANGUAGES.items()] def gradio_translate(text, source, target): src_code = source.split(" ")[0] tgt_code = target.split(" ")[0] return translate(text, src_code, tgt_code) # βœ… Gradio UI μ •μ˜ with gr.Blocks(title="TranslateGemma") as demo: gr.HTML(""" """) with gr.Column(elem_id="col-container"): gr.HTML("""
badge

🌍 TRANSLATE GEMMA

⚑ AI-Powered Translation for 55 Languages ⚑

πŸ€– MODEL: translategemma-4b-it
""") with gr.Row(): source_lang = gr.Dropdown( choices=LANG_CHOICES, value="en (English)", label="πŸ“€ SOURCE LANGUAGE" ) gr.HTML('
⚑➑️⚑
') target_lang = gr.Dropdown( choices=LANG_CHOICES, value="ko (Korean)", label="πŸ“₯ TARGET LANGUAGE" ) with gr.Row(): with gr.Column(): input_text = gr.Textbox( label="πŸ’¬ INPUT", lines=8, placeholder="Type or paste your text here..." ) with gr.Column(): output_text = gr.Textbox( label="✨ OUTPUT", lines=8, interactive=False, placeholder="Translation will appear here..." ) translate_btn = gr.Button("πŸ’₯ POW! TRANSLATE πŸ’₯", variant="primary", elem_classes="pow-btn") gr.HTML("""

πŸ”— MCP Endpoint: POST /mcp

⏱️ Note: First request loads model (~60s), then fast (~5s)

""") translate_btn.click( fn=gradio_translate, inputs=[input_text, source_lang, target_lang], outputs=output_text ) # βœ… Gradio μ•± μ‹€ν–‰ ν›„ FastAPI 라우트 μΆ”κ°€ app = demo.app # Gradio λ‚΄λΆ€ FastAPI μ•± κ°€μ Έμ˜€κΈ° @app.get("/health") async def health_check(): return {"status": "ok"} @app.post("/mcp") async def mcp_handler(request: Request): body = await request.json() method = body.get("method", "") params = body.get("params", {}) msg_id = body.get("id") if method == "initialize": return JSONResponse({ "jsonrpc": "2.0", "id": msg_id, "result": { "protocolVersion": "2024-11-05", "capabilities": {"tools": {}}, "serverInfo": { "name": "translategemma-mcp", "version": "1.0.0" } } }) elif method == "tools/list": return JSONResponse({ "jsonrpc": "2.0", "id": msg_id, "result": { "tools": [ { "name": "translate", "description": "Translate text between 55 languages using TranslateGemma-4B-IT", "inputSchema": { "type": "object", "properties": { "text": {"type": "string", "description": "The text to translate"}, "source_lang": {"type": "string", "description": f"Source language code: {', '.join(LANG_CODES)}"}, "target_lang": {"type": "string", "description": f"Target language code: {', '.join(LANG_CODES)}"} }, "required": ["text", "source_lang", "target_lang"] } } ] } }) elif method == "tools/call": tool_name = params.get("name") arguments = params.get("arguments", {}) if tool_name == "translate": try: result = translate( arguments.get("text", ""), arguments.get("source_lang", "en"), arguments.get("target_lang", "en") ) return JSONResponse({ "jsonrpc": "2.0", "id": msg_id, "result": {"content": [{"type": "text", "text": result}]} }) except Exception as e: return JSONResponse({ "jsonrpc": "2.0", "id": msg_id, "error": {"code": -32000, "message": str(e)} }) return JSONResponse({ "jsonrpc": "2.0", "id": msg_id, "error": {"code": -32601, "message": f"Method not found: {method}"} }) # βœ… Gradio launch둜 μ‹€ν–‰ (FastAPI mount λŒ€μ‹ ) if __name__ == "__main__": demo.launch(server_name="0.0.0.0", server_port=7860)