BEWBOT2 / app.py
PatSeal's picture
Update app.py
0a79fe1 verified
raw
history blame contribute delete
925 Bytes
import gradio as gr
import tensorflow as tf
from transformers import GPT2Tokenizer, TFGPT2LMHeadModel
# Load the pre-trained GPT-2 model and tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = TFGPT2LMHeadModel.from_pretrained("gpt2", pad_token_id=tokenizer.eos_token_id)
# Define a function for generating responses
def chatbot(input_text):
# Tokenize the input text
input_ids = tokenizer.encode(input_text, return_tensors="tf")
# Generate a response from the model
output_ids = model.generate(input_ids, max_length=100, pad_token_id=tokenizer.eos_token_id)
# Decode the response and return it
response_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
return response_text
# Create a Gradio interface for the chatbot
gr.Interface(
fn=chatbot,
inputs="text",
outputs="text",
title="GPT-2 Chatbot",
description="Ask me anything!"
).launch()