Update app.py
Browse files
app.py
CHANGED
|
@@ -1,25 +1,46 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import pipeline
|
| 3 |
|
| 4 |
-
# Load
|
| 5 |
-
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
-
#
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
history.append((user_input, response))
|
| 13 |
return "", history
|
| 14 |
|
| 15 |
-
#
|
| 16 |
iface = gr.Interface(
|
| 17 |
-
fn=
|
| 18 |
inputs=["text", "state"],
|
| 19 |
outputs=["text", "state"],
|
| 20 |
live=True,
|
| 21 |
-
title="
|
| 22 |
-
description="
|
|
|
|
|
|
|
| 23 |
)
|
| 24 |
|
| 25 |
# Launch the chatbot interface
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 3 |
|
| 4 |
+
# Load the advanced conversational model (e.g., GPT-NeoX-20B or GPT-J-6B)
|
| 5 |
+
model_name = "EleutherAI/gpt-j-6B"
|
| 6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 7 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 8 |
+
chatbot_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_length=200)
|
| 9 |
|
| 10 |
+
# Define FAQs for instant responses
|
| 11 |
+
faqs = {
|
| 12 |
+
"How do I enroll in a course?": "To enroll, go to our website, select the course you're interested in, and click 'Enroll Now'. Follow the on-screen instructions.",
|
| 13 |
+
"What is the refund policy?": "We offer a full refund within the first 14 days if you are not satisfied with the course.",
|
| 14 |
+
"How can I access my course materials?": "Once you enroll, you can access course materials in the 'My Courses' section of your account.",
|
| 15 |
+
"Are there any live sessions available?": "Yes, many courses include live sessions. You can view the schedule in the course overview.",
|
| 16 |
+
"How can I get a certificate?": "Complete all modules and pass the final assessment with a minimum score of 70% to receive your certificate.",
|
| 17 |
+
# Add more FAQs as needed
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
# Function to generate a response with FAQs and model for other queries
|
| 21 |
+
def generate_response(user_input, history=[]):
|
| 22 |
+
# Check if input matches any FAQ
|
| 23 |
+
if user_input in faqs:
|
| 24 |
+
response = faqs[user_input]
|
| 25 |
+
else:
|
| 26 |
+
# Generate response using the model for non-FAQ queries
|
| 27 |
+
prompt = f"Student: {user_input}\nAssistant:"
|
| 28 |
+
model_response = chatbot_pipeline(prompt)[0]['generated_text']
|
| 29 |
+
response = model_response.split("Assistant:")[-1].strip()
|
| 30 |
+
|
| 31 |
history.append((user_input, response))
|
| 32 |
return "", history
|
| 33 |
|
| 34 |
+
# Real-time chatbot UI with Gradio
|
| 35 |
iface = gr.Interface(
|
| 36 |
+
fn=generate_response,
|
| 37 |
inputs=["text", "state"],
|
| 38 |
outputs=["text", "state"],
|
| 39 |
live=True,
|
| 40 |
+
title="Edutech Student Chatbot",
|
| 41 |
+
description="Ask me anything about courses, enrollment, certification, and more!",
|
| 42 |
+
theme="compact",
|
| 43 |
+
css=".gradio-container { background-color: #f5f5f5; font-family: Arial, sans-serif; }"
|
| 44 |
)
|
| 45 |
|
| 46 |
# Launch the chatbot interface
|