Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import MarianMTModel, MarianTokenizer | |
| # Assuming the environment is set up for GPU use if available | |
| # This is more about the environment setup than code modification | |
| # Define a function that loads a model and tokenizer based on the chosen language | |
| def load_model(lang_pair): | |
| model_name = { | |
| "English to French": 'Helsinki-NLP/opus-mt-en-fr', | |
| "Kinyarwanda to English": 'Helsinki-NLP/opus-mt-rw-en' | |
| }[lang_pair] | |
| tokenizer = MarianTokenizer.from_pretrained(model_name) | |
| model = MarianMTModel.from_pretrained(model_name) | |
| return model, tokenizer | |
| # Example function that could be used for caching (conceptual implementation) | |
| cache = {} | |
| def get_translation_from_cache_or_model(model, tokenizer, text): | |
| if text in cache: | |
| return cache[text] | |
| model_inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True) | |
| gen = model.generate(**model_inputs) | |
| translation = tokenizer.batch_decode(gen, skip_special_tokens=True)[0] | |
| cache[text] = translation | |
| return translation | |
| # Function to translate text based on selected language | |
| def translate(lang_pair, text): | |
| model, tokenizer = load_model(lang_pair) | |
| # Use the caching function | |
| translation = get_translation_from_cache_or_model(model, tokenizer, text) | |
| return translation | |
| # Create a Gradio interface with a dropdown menu for language selection | |
| iface = gr.Interface( | |
| fn=translate, | |
| inputs=[gr.Dropdown(choices=["English to French", "Kinyarwanda to English"], label="Select Language Pair"), | |
| gr.Textbox(lines=2, placeholder="Enter Text...")], | |
| outputs=gr.Textbox(label="Translation") | |
| ) | |
| # Launch the interface | |
| iface.launch(debug=True,inline=False) | |