studychatbot / app.py
MissieMcCown's picture
Update app.py
b26fbd2 verified
raw
history blame
1.3 kB
import gradio as gr
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
# Initialize pre-trained model and tokenizer
model_name = "gpt2" # You can change this to another model if needed
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Create a pipeline for text generation
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Chatbot response function
def chatbot_response(user_input):
# Generate a response using the model
response = generator(user_input, max_length=100, num_return_sequences=1, temperature=0.7, top_k=50)
# Extract and return the generated text
return response[0]['generated_text']
# Create the Gradio interface
with gr.Blocks() as demo:
gr.Markdown("# Study Assistance Chatbot")
gr.Markdown("Welcome! Ask me anything related to your academic studies.")
with gr.Row():
with gr.Column():
user_input = gr.Textbox(label="Enter your question here:")
submit_button = gr.Button("Submit")
with gr.Column():
chatbot_output = gr.Textbox(label="Chatbot Response", interactive=False)
submit_button.click(chatbot_response, inputs=user_input, outputs=chatbot_output)
demo.launch()