File size: 2,212 Bytes
978081f
5941c73
3cd4511
5941c73
 
 
 
 
3cd4511
5941c73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3cd4511
 
 
5941c73
3cd4511
5941c73
3cd4511
 
 
5941c73
 
 
 
978081f
 
3cd4511
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

# Load the advanced conversational model (e.g., GPT-NeoX-20B or GPT-J-6B)
model_name = "EleutherAI/gpt-j-6B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
chatbot_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=200)

# Define FAQs for instant responses
faqs = {
    "How do I enroll in a course?": "To enroll, go to our website, select the course you're interested in, and click 'Enroll Now'. Follow the on-screen instructions.",
    "What is the refund policy?": "We offer a full refund within the first 14 days if you are not satisfied with the course.",
    "How can I access my course materials?": "Once you enroll, you can access course materials in the 'My Courses' section of your account.",
    "Are there any live sessions available?": "Yes, many courses include live sessions. You can view the schedule in the course overview.",
    "How can I get a certificate?": "Complete all modules and pass the final assessment with a minimum score of 70% to receive your certificate.",
    # Add more FAQs as needed
}

# Function to generate a response with FAQs and model for other queries
def generate_response(user_input, history=[]):
    # Check if input matches any FAQ
    if user_input in faqs:
        response = faqs[user_input]
    else:
        # Generate response using the model for non-FAQ queries
        prompt = f"Student: {user_input}\nAssistant:"
        model_response = chatbot_pipeline(prompt)[0]['generated_text']
        response = model_response.split("Assistant:")[-1].strip()
    
    history.append((user_input, response))
    return "", history

# Real-time chatbot UI with Gradio
iface = gr.Interface(
    fn=generate_response,
    inputs=["text", "state"],
    outputs=["text", "state"],
    live=True,
    title="Edutech Student Chatbot",
    description="Ask me anything about courses, enrollment, certification, and more!",
    theme="compact",
    css=".gradio-container { background-color: #f5f5f5; font-family: Arial, sans-serif; }"
)

# Launch the chatbot interface
iface.launch()