Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +8 -9
  3. requirements.txt +1 -2
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🦙
4
  colorFrom: yellow
5
  colorTo: blue
6
  sdk: gradio
7
- sdk_version: 2.9.1
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: yellow
5
  colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 5.35.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py CHANGED
@@ -1,21 +1,20 @@
1
  import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
 
 
 
 
 
4
  def translate(text):
5
- model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua'
6
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
-
9
  input = tokenizer(text, return_tensors="pt")
10
  output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True)
11
-
12
  return tokenizer.decode(output[0], skip_special_tokens=True)
13
 
14
  title = "Spanish to Quechua translation 🦙"
15
- inputs = gr.inputs.Textbox(lines=1, label="Text in Spanish")
16
- outputs = [gr.outputs.Textbox(label="Translated text in Quechua")]
17
 
18
- description = "Here use the [t5-small-finetuned-spanish-to-quechua-model](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua) that was trained with [spanish-to-quechua dataset](https://huggingface.co/datasets/hackathon-pln-es/spanish-to-quechua)."
19
 
20
  article = '''
21
  ## Challenges
@@ -35,4 +34,4 @@ examples=[
35
  ]
36
 
37
  iface = gr.Interface(fn=translate, inputs=inputs, outputs=outputs, theme="grass", css="styles.css", examples=examples, title=title, description=description, article=article)
38
- iface.launch(enable_queue=True)
 
1
  import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
 
4
+ model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua'
5
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+
8
  def translate(text):
 
 
 
 
9
  input = tokenizer(text, return_tensors="pt")
10
  output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True)
 
11
  return tokenizer.decode(output[0], skip_special_tokens=True)
12
 
13
  title = "Spanish to Quechua translation 🦙"
14
+ inputs = gr.Textbox(lines=1, label="Text in Spanish")
15
+ outputs = [gr.Textbox(label="Translated text in Quechua")]
16
 
17
+ description = "Here we use the [t5-small-finetuned-spanish-to-quechua-model](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua) that was trained with [spanish-to-quechua dataset](https://huggingface.co/datasets/hackathon-pln-es/spanish-to-quechua)."
18
 
19
  article = '''
20
  ## Challenges
 
34
  ]
35
 
36
  iface = gr.Interface(fn=translate, inputs=inputs, outputs=outputs, theme="grass", css="styles.css", examples=examples, title=title, description=description, article=article)
37
+ iface.queue().launch()
requirements.txt CHANGED
@@ -1,3 +1,2 @@
1
- gradio
2
  transformers
3
- torch
 
 
1
  transformers
2
+ torch