piccora commited on
Commit
10a67b8
·
verified ·
1 Parent(s): f49e12b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -3,11 +3,12 @@ from transformers import TFAutoModelForSeq2SeqLM, AutoTokenizer
3
 
4
  model_name = "piccora/transformers"
5
  model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name)
 
6
 
7
  def predict(question):
8
- inputs = tokenizer_q(question, return_tensors="tf", padding=True)
9
  outputs = model.generate(inputs["input_ids"], max_length=50)
10
- answer = tokenizer_a.decode(outputs[0], skip_special_tokens=True)
11
  return answer
12
 
13
  interface = gr.Interface(fn=predict, inputs="text", outputs="text")
 
3
 
4
  model_name = "piccora/transformers"
5
  model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name)
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
 
8
  def predict(question):
9
+ inputs = tokenizer(question, return_tensors="tf", padding=True, truncation=True)
10
  outputs = model.generate(inputs["input_ids"], max_length=50)
11
+ answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
12
  return answer
13
 
14
  interface = gr.Interface(fn=predict, inputs="text", outputs="text")