breadlicker45's picture
Update app.py
4cced22
raw
history blame contribute delete
895 Bytes
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, set_seed
import random
tokenizer = AutoTokenizer.from_pretrained("breadlicker45/dough-instruct-base-001")
tokenizer.padding_side = 'left'
model = AutoModelForCausalLM.from_pretrained("breadlicker45/dough-instruct-base-001")
def generate_text(prompt):
set_seed((random.randint(1, 10000)))
generator = pipeline('text-generation', model=model, tokenizer=tokenizer, do_sample=True)
text = """Q:
""" + prompt + """
A:"""
answer = generator(text, max_length=50, min_length=10, temperature=0.8, top_p=0.9, do_sample=True)
lst = answer[0]['generated_text']
out = lst.replace(text, '''''')
return out
input_text = gr.inputs.Textbox(lines=5, label="Input Text")
output_text = gr.outputs.Textbox(label="Generated Text")
gr.Interface(generate_text, input_text, output_text).launch()