16pramodh commited on
Commit
d706a9f
·
verified ·
1 Parent(s): 385a3d3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import os
4
+
5
+ # Set the cache directory for Hugging Face models to ensure they are saved within the Space
6
+ os.environ['HUGGINGFACE_HUB_CACHE'] = '/app/.cache/huggingface/hub'
7
+
8
+ # The name of the model you want to use
9
+ model_name = "facebook/nllb-200-distilled-600M"
10
+
11
+ # Load the translation pipeline
12
+ # The pipeline will automatically download the tokenizer and model from the Hub
13
+ pipe = pipeline(
14
+ "translation",
15
+ model=model_name,
16
+ src_lang="eng_Latn",
17
+ tgt_lang="hin_Deva",
18
+ device=0, # Use GPU if available
19
+ )
20
+
21
+ # Define the translation function that Gradio will expose as an API
22
+ def translate_text(text, source_lang, target_lang):
23
+ if not text:
24
+ return "No text provided."
25
+
26
+ # Use the pipeline to translate the text
27
+ result = pipe(text)
28
+
29
+ # Extract the translated text from the pipeline's output
30
+ translation = result[0]['translation_text']
31
+ return translation
32
+
33
+ # Create the Gradio Interface
34
+ iface = gr.Interface(
35
+ fn=translate_text,
36
+ inputs=[
37
+ gr.Textbox(label="Input Text")
38
+ ],
39
+ outputs="text",
40
+ title="NLLB-200 Distilled Translation API",
41
+ description="A public API for the NLLB-200 translation model, with support for multiple languages."
42
+ )
43
+
44
+ # Launch the Gradio app
45
+ if __name__ == "__main__":
46
+ iface.launch()