File size: 1,046 Bytes
a93ec6f fb6f219 a93ec6f 47ebffb a93ec6f 46661d9 a93ec6f | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 | import gradio as gr
from transformers import GPT2Tokenizer, GPT2LMHeadModel, TextDataset, DataCollatorForLanguageModeling
from transformers import Trainer, TrainingArguments
from os.path import dirname
import torch
model_output_path="change"
my_model = GPT2LMHeadModel.from_pretrained(model_output_path)
my_tokenizer = GPT2Tokenizer.from_pretrained(model_output_path)
def generate_response(model, tokenizer, prompt, max_length=200):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
attention_mask = torch.ones_like(input_ids)
pad_token_id = tokenizer.eos_token_id
output = model.generate(
input_ids,
max_length=max_length,
num_return_sequences=1,
attention_mask=attention_mask,
pad_token_id=pad_token_id
)
return tokenizer.decode(output[0], skip_special_tokens=True)
def predict(prompt):
response = generate_response(my_model, my_tokenizer, prompt)
return response
iface = gr.Interface(fn=predict, inputs="text", outputs="text")
iface.launch()
|