aliabd commited on
Commit
b016259
·
1 Parent(s): d6e5c13

Upload folder using huggingface_hub

Browse files
Files changed (5) hide show
  1. DESCRIPTION.md +1 -0
  2. README.md +6 -6
  3. requirements.txt +3 -0
  4. run.ipynb +1 -0
  5. run.py +33 -0
DESCRIPTION.md ADDED
@@ -0,0 +1 @@
 
 
1
+ This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example.
README.md CHANGED
@@ -1,12 +1,12 @@
 
1
  ---
2
- title: Translation 3-x
3
  emoji: 🔥
4
- colorFrom: green
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 4.3.0
8
- app_file: app.py
9
  pinned: false
 
10
  ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+
2
  ---
3
+ title: translation_3-x
4
  emoji: 🔥
5
+ colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 3.50.1
9
+ app_file: run.py
10
  pinned: false
11
+ hf_oauth: true
12
  ---
 
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ git+https://github.com/huggingface/transformers
2
+ gradio
3
+ torch
run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: translation\n", "### This translation demo takes in the text, source and target languages, and returns the translation. It uses the Transformers library to set up the model and has a title, description, and example.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio git+https://github.com/huggingface/transformers gradio torch"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline\n", "import torch\n", "\n", "# this model was loaded from https://hf.co/models\n", "model = AutoModelForSeq2SeqLM.from_pretrained(\"facebook/nllb-200-distilled-600M\")\n", "tokenizer = AutoTokenizer.from_pretrained(\"facebook/nllb-200-distilled-600M\")\n", "device = 0 if torch.cuda.is_available() else -1\n", "LANGS = [\"ace_Arab\", \"eng_Latn\", \"fra_Latn\", \"spa_Latn\"]\n", "\n", "def translate(text, src_lang, tgt_lang):\n", " \"\"\"\n", " Translate the text from source lang to target lang\n", " \"\"\"\n", " translation_pipeline = pipeline(\"translation\", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)\n", " result = translation_pipeline(text)\n", " return result[0]['translation_text']\n", "\n", "demo = gr.Interface(\n", " fn=translate,\n", " inputs=[\n", " gr.components.Textbox(label=\"Text\"),\n", " gr.components.Dropdown(label=\"Source Language\", choices=LANGS),\n", " gr.components.Dropdown(label=\"Target Language\", choices=LANGS),\n", " ],\n", " outputs=[\"text\"],\n", " examples=[[\"Building a translation demo with Gradio is so easy!\", \"eng_Latn\", \"spa_Latn\"]],\n", " cache_examples=False,\n", " title=\"Translation Demo\",\n", " description=\"This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space\"\n", ")\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
+ import torch
4
+
5
+ # this model was loaded from https://hf.co/models
6
+ model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
7
+ tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
8
+ device = 0 if torch.cuda.is_available() else -1
9
+ LANGS = ["ace_Arab", "eng_Latn", "fra_Latn", "spa_Latn"]
10
+
11
+ def translate(text, src_lang, tgt_lang):
12
+ """
13
+ Translate the text from source lang to target lang
14
+ """
15
+ translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)
16
+ result = translation_pipeline(text)
17
+ return result[0]['translation_text']
18
+
19
+ demo = gr.Interface(
20
+ fn=translate,
21
+ inputs=[
22
+ gr.components.Textbox(label="Text"),
23
+ gr.components.Dropdown(label="Source Language", choices=LANGS),
24
+ gr.components.Dropdown(label="Target Language", choices=LANGS),
25
+ ],
26
+ outputs=["text"],
27
+ examples=[["Building a translation demo with Gradio is so easy!", "eng_Latn", "spa_Latn"]],
28
+ cache_examples=False,
29
+ title="Translation Demo",
30
+ description="This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space"
31
+ )
32
+
33
+ demo.launch()