Najeb_Bot / app.py
sohiebwedyan's picture
Update app.py
cff86df verified
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
# Load the trained GPT-2 model and tokenizer from your drive (or huggingface model hub)
model_path = 'sohiebwedyan/najeb_chat' # Path to your saved model and tokenizer
tokenizer = GPT2Tokenizer.from_pretrained(model_path)
model = GPT2LMHeadModel.from_pretrained(model_path)
# Function to generate a response from the model
def generate_response(prompt, max_length, temperature):
inputs = tokenizer.encode(prompt, return_tensors='pt')
outputs = model.generate(inputs, max_length=max_length, temperature=temperature, pad_token_id=tokenizer.eos_token)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Create the Gradio interface
iface = gr.Interface(
fn=generate_response,
inputs=[
gr.Textbox(lines=2, placeholder="Enter your message", label="Input Prompt"),
gr.Slider(minimum=10, maximum=512, step=10, value=100, label="Max Length"),
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, value=0.7, label="Temperature")
],
outputs="text",
title="Najeb GPT-2 Chatbot",
description="This is a chatbot trained on networking questions and answers. Adjust the max length and temperature for different responses."
)
# Launch the app
if __name__ == "__main__":
iface.launch()