File size: 2,342 Bytes
2e78e50
65f6f92
 
 
235e024
a39fa98
65f6f92
 
 
 
 
 
 
 
 
 
 
9b284e5
65f6f92
 
 
 
9b284e5
65f6f92
 
 
 
9b284e5
 
65f6f92
 
9b284e5
 
 
65f6f92
 
bf0aeeb
 
 
65f6f92
 
 
9b284e5
 
 
ca92c8c
65f6f92
 
 
bf0aeeb
65f6f92
 
 
 
 
 
235e024
baacc26
65f6f92
 
 
9b284e5
65f6f92
9b284e5
65f6f92
 
bf0aeeb
65f6f92
 
 
 
 
 
9b284e5
65f6f92
 
bf0aeeb
65f6f92
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
from transformers import pipeline
from huggingface_hub import InferenceClient
import os
import json

# Initialize Hugging Face Inference Client
client = InferenceClient(api_key="your_huggingface_api_key_here")

# 🎯 Study-only question filter
def is_study_related(question):
    educational_keywords = [
        "math", "science", "ict", "english", "chemistry", "physics", "biology",
        "grammar", "essay", "study", "lesson", "equation", "formula", "computer",
        "programming", "AI", "machine learning", "technology", "education",
        "subject", "exam", "revision", "teacher", "learning", "school", "topic"
    ]

    for word in educational_keywords:
        if word.lower() in question.lower():
            return True
    return False

# Memory save/load
def save_memory(history):
    with open("chat_memory.json", "w") as f:
        json.dump(history, f)

def load_memory():
    if os.path.exists("chat_memory.json"):
        with open("chat_memory.json", "r") as f:
            return json.load(f)
    return []

# Chat logic
def chat_with_model(message, history):
    if not message:
        return history, history

    # 🚫 Block unnecessary/off-topic questions
    if not is_study_related(message):
        reply = "🚫 I'm sorry, but I can only answer study-related questions. Let's focus on learning!"
        history.append((message, reply))
        save_memory(history)
        return history, history

    # Append user message to history
    history.append((message, ""))
    save_memory(history)

    # Generate AI response
    response = client.text_generation(
        model="mistralai/Mixtral-8x7B-Instruct-v0.1",
        prompt=message,
        max_new_tokens=300,
        temperature=0.7
    )

    reply = response
    history[-1] = (message, reply)
    save_memory(history)

    return history, history

# Load existing memory
memory = load_memory()

# Interface
with gr.Blocks(theme="soft") as demo:
    gr.Markdown("## 🤖 EduAI — Where Curiosity Meets Knowledge")
    chatbot = gr.Chatbot(label="EduAI Learning Assistant", value=memory)
    msg = gr.Textbox(label="Ask EduAI a study question...")
    clear = gr.Button("Clear Chat")

    msg.submit(chat_with_model, [msg, chatbot], [chatbot, chatbot])
    clear.click(lambda: [], None, chatbot)

# Launch app
demo.launch()