chatbot / app.py
Bharatmali999's picture
Update app.py
908347d verified
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the satvikag/chatbot model and tokenizer from Hugging Face
model_name = "satvikag/chatbot"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# Initialize a cache to store previous responses
cache = {}
# Function to handle user input and generate chatbot response
def chatbot_response(user_input):
if user_input in cache:
return cache[user_input] # Return the cached response if available
# Encode user input
new_user_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt')
# Get the response from the model
bot_output = model.generate(new_user_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=2)
# Decode the response and return
bot_output_text = tokenizer.decode(bot_output[:, new_user_input_ids.shape[-1]:][0], skip_special_tokens=True)
# Cache the response
cache[user_input] = bot_output_text
return bot_output_text
# Create a simple interface using Gradio
iface = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", live=True)
# Launch the chatbot
iface.launch()