lruizap commited on
Commit
a9fe253
·
1 Parent(s): b7b4d72

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -31,9 +31,9 @@ def useZephyr(prompt):
31
  return outputs[0]["generated_text"]
32
 
33
 
34
- def generatePrompt(prompt, max_new_tokens):
35
  batch = tokenizer(prompt, return_tensors="pt")
36
- generated_ids = model.generate(batch["input_ids"], max_new_tokens=max_new_tokens)
37
  output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
38
  prompt = output[0]
39
 
@@ -55,13 +55,12 @@ def generate_prompt(prompt, max_new_tokens):
55
 
56
 
57
  input_prompt = gr.Textbox(label="Prompt", value="photographer")
58
- input_maxtokens = gr.Textbox(label="Max tokens", value="150")
59
  output_component = gr.Textbox(label="Output")
60
  examples = [["photographer"], ["developer"], ["teacher"], [
61
  "human resources staff"], ["recipe for ham croquettes"]]
62
  description = ""
63
 
64
- PerfectGPT = gr.Interface(generatePrompt, inputs=[input_prompt, input_maxtokens], outputs=output_component,
65
  examples=examples, title="🗿 PerfectGPT v1 🗿", description=description)
66
 
67
  PerfectGPT.launch()
 
31
  return outputs[0]["generated_text"]
32
 
33
 
34
+ def generatePrompt(prompt):
35
  batch = tokenizer(prompt, return_tensors="pt")
36
+ generated_ids = model.generate(batch["input_ids"])
37
  output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
38
  prompt = output[0]
39
 
 
55
 
56
 
57
  input_prompt = gr.Textbox(label="Prompt", value="photographer")
 
58
  output_component = gr.Textbox(label="Output")
59
  examples = [["photographer"], ["developer"], ["teacher"], [
60
  "human resources staff"], ["recipe for ham croquettes"]]
61
  description = ""
62
 
63
+ PerfectGPT = gr.Interface(generatePrompt, inputs=input_prompt, outputs=output_component,
64
  examples=examples, title="🗿 PerfectGPT v1 🗿", description=description)
65
 
66
  PerfectGPT.launch()