|
|
import gradio as gr |
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
|
|
|
|
|
model_name = "EleutherAI/gpt-j-6B" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
chatbot_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=200) |
|
|
|
|
|
|
|
|
faqs = { |
|
|
"How do I enroll in a course?": "To enroll, go to our website, select the course you're interested in, and click 'Enroll Now'. Follow the on-screen instructions.", |
|
|
"What is the refund policy?": "We offer a full refund within the first 14 days if you are not satisfied with the course.", |
|
|
"How can I access my course materials?": "Once you enroll, you can access course materials in the 'My Courses' section of your account.", |
|
|
"Are there any live sessions available?": "Yes, many courses include live sessions. You can view the schedule in the course overview.", |
|
|
"How can I get a certificate?": "Complete all modules and pass the final assessment with a minimum score of 70% to receive your certificate.", |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
def generate_response(user_input, history=[]): |
|
|
|
|
|
if user_input in faqs: |
|
|
response = faqs[user_input] |
|
|
else: |
|
|
|
|
|
prompt = f"Student: {user_input}\nAssistant:" |
|
|
model_response = chatbot_pipeline(prompt)[0]['generated_text'] |
|
|
response = model_response.split("Assistant:")[-1].strip() |
|
|
|
|
|
history.append((user_input, response)) |
|
|
return "", history |
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=generate_response, |
|
|
inputs=["text", "state"], |
|
|
outputs=["text", "state"], |
|
|
live=True, |
|
|
title="Edutech Student Chatbot", |
|
|
description="Ask me anything about courses, enrollment, certification, and more!", |
|
|
theme="compact", |
|
|
css=".gradio-container { background-color: #f5f5f5; font-family: Arial, sans-serif; }" |
|
|
) |
|
|
|
|
|
|
|
|
iface.launch() |
|
|
|