Spaces:
Sleeping
Sleeping
| from transformers import pipeline | |
| import gradio as gr | |
| import os | |
| from transformers import NllbTokenizer | |
| # Set the cache directory for Hugging Face models to ensure they are saved within the Space | |
| os.environ['HUGGINGFACE_HUB_CACHE'] = '/app/.cache/huggingface/hub' | |
| # The name of the model you want to use | |
| model_name = "16pramodh/NMT_YAP" | |
| tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M") | |
| # Load the translation pipeline | |
| # The pipeline will automatically download the tokenizer and model from the Hub | |
| pipe = pipeline( | |
| "translation", | |
| model=model_name, | |
| tokenizer=tokenizer, | |
| src_lang="eng_Latn", | |
| tgt_lang="hin_Deva", | |
| device=0, # Use GPU if available | |
| ) | |
| # Define the translation function that Gradio will expose as an API | |
| def translate_text(text, source_lang, target_lang): | |
| if not text: | |
| return "No text provided." | |
| # Use the pipeline to translate the text | |
| result = pipe(text) | |
| # Extract the translated text from the pipeline's output | |
| translation = result[0]['translation_text'] | |
| return translation | |
| # Create the Gradio Interface | |
| iface = gr.Interface( | |
| fn=translate_text, | |
| inputs=[ | |
| gr.Textbox(label="Input Text") | |
| ], | |
| outputs="text", | |
| title="NLLB-200 Distilled finetuned Translation API", | |
| description="A public API for the NLLB-200 translation model, for english to hindi translation." | |
| ) | |
| # Launch the Gradio app | |
| if __name__ == "__main__": | |
| iface.launch() | |