File size: 895 Bytes
5d201e7
4cced22
 
5d201e7
6d16bdb
 
 
 
 
4cced22
6d16bdb
 
 
 
4cced22
6d16bdb
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline, set_seed
import random

tokenizer = AutoTokenizer.from_pretrained("breadlicker45/dough-instruct-base-001")
tokenizer.padding_side = 'left'
model = AutoModelForCausalLM.from_pretrained("breadlicker45/dough-instruct-base-001")

def generate_text(prompt):
    set_seed((random.randint(1, 10000)))
    generator = pipeline('text-generation', model=model, tokenizer=tokenizer, do_sample=True)
    text = """Q:
""" + prompt + """
A:"""
    answer = generator(text, max_length=50, min_length=10, temperature=0.8, top_p=0.9, do_sample=True)
    lst = answer[0]['generated_text']
    out = lst.replace(text, '''''')
    return out

input_text = gr.inputs.Textbox(lines=5, label="Input Text")
output_text = gr.outputs.Textbox(label="Generated Text")

gr.Interface(generate_text, input_text, output_text).launch()