AdityaQB commited on
Commit
f591b3e
·
verified ·
1 Parent(s): 2ca519c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, TFAutoModelForCausalLM
3
 
4
- def get_response(name,text):
5
  tokenizer = AutoTokenizer.from_pretrained("gpt2")
6
  model = TFAutoModelForCausalLM.from_pretrained("gpt2")
7
  model.config.pad_token_id = model.config.eos_token_id
@@ -9,7 +9,6 @@ def get_response(name,text):
9
 
10
  generated = model.generate(**inputs, do_sample=True, seed=(42, 0), max_new_tokens=20, temperature=.1)
11
  return tokenizer.decode(generated[0])
12
- # print("Sampling output: ", tokenizer.decode(generated[0]))
13
 
14
- iface = gr.Interface(fn=get_response, inputs="text", outputs="text")
15
  iface.launch()
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, TFAutoModelForCausalLM
3
 
4
+ def get_response(text):
5
  tokenizer = AutoTokenizer.from_pretrained("gpt2")
6
  model = TFAutoModelForCausalLM.from_pretrained("gpt2")
7
  model.config.pad_token_id = model.config.eos_token_id
 
9
 
10
  generated = model.generate(**inputs, do_sample=True, seed=(42, 0), max_new_tokens=20, temperature=.1)
11
  return tokenizer.decode(generated[0])
 
12
 
13
+ iface = gr.Interface(fn=get_response, inputs="text", outputs="text",share=True)
14
  iface.launch()