breadlicker45 commited on
Commit
4cced22
·
1 Parent(s): 9df55a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -1,16 +1,18 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
 
3
 
4
  tokenizer = AutoTokenizer.from_pretrained("breadlicker45/dough-instruct-base-001")
5
  tokenizer.padding_side = 'left'
6
  model = AutoModelForCausalLM.from_pretrained("breadlicker45/dough-instruct-base-001")
7
 
8
  def generate_text(prompt):
 
9
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer, do_sample=True)
10
  text = """Q:
11
  """ + prompt + """
12
  A:"""
13
- answer = generator(text, max_length=50, min_length=10, temperature=0.8, top_p=0.9)
14
  lst = answer[0]['generated_text']
15
  out = lst.replace(text, '''''')
16
  return out
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, set_seed
3
+ import random
4
 
5
  tokenizer = AutoTokenizer.from_pretrained("breadlicker45/dough-instruct-base-001")
6
  tokenizer.padding_side = 'left'
7
  model = AutoModelForCausalLM.from_pretrained("breadlicker45/dough-instruct-base-001")
8
 
9
  def generate_text(prompt):
10
+ set_seed((random.randint(1, 10000)))
11
  generator = pipeline('text-generation', model=model, tokenizer=tokenizer, do_sample=True)
12
  text = """Q:
13
  """ + prompt + """
14
  A:"""
15
+ answer = generator(text, max_length=50, min_length=10, temperature=0.8, top_p=0.9, do_sample=True)
16
  lst = answer[0]['generated_text']
17
  out = lst.replace(text, '''''')
18
  return out