| import gradio as gr | |
| from transformers import GPT2Tokenizer, GPT2LMHeadModel, TextDataset, DataCollatorForLanguageModeling | |
| from transformers import Trainer, TrainingArguments | |
| from os.path import dirname | |
| model_output_path="model-dir" | |
| my_model = GPT2LMHeadModel.from_pretrained(model_output_path) | |
| my_tokenizer = GPT2Tokenizer.from_pretrained(model_output_path) | |
| def predict(prompt): | |
| response = generate_response(my_model, my_tokenizer, prompt) | |
| return response | |
| iface = gr.Interface(fn=predict, inputs="text", outputs="text") | |
| iface.launch() | |