File size: 3,830 Bytes
c9d85a3
b84f49e
 
 
 
 
c9d85a3
 
 
 
 
b84f49e
 
 
 
 
c9d85a3
 
 
 
b84f49e
c9d85a3
b84f49e
 
 
 
c9d85a3
 
 
 
b84f49e
c9d85a3
b84f49e
 
 
 
c9d85a3
 
 
 
b84f49e
c9d85a3
b84f49e
 
 
 
c9d85a3
 
 
 
b84f49e
c9d85a3
b84f49e
 
 
 
 
 
 
c9d85a3
b84f49e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c9d85a3
b84f49e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import os
import openai
import gradio as gr
from PIL import Image
import pytesseract

# Access the OpenAI API key from Hugging Face Secrets
api_key = os.getenv("OPENAI_API_KEY")

# Set the OpenAI API key
openai.api_key = api_key

# --- AI Features ---
def generate_lesson_plan(subject, grade, image=None):
    image_text = extract_text(image) if image else ""
    prompt = f"Create a detailed lesson plan for {subject} at grade {grade} level.\n\n{image_text}"
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=prompt,
        max_tokens=500
    )
    return response.choices[0].text.strip()

def grade_student_answer(question, student_answer, image=None):
    image_text = extract_text(image) if image else ""
    prompt = f"Question: {question}\nStudent's Answer: {student_answer}\n\n{image_text}\n\nGrade this answer and provide feedback."
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=prompt,
        max_tokens=500
    )
    return response.choices[0].text.strip()

def track_progress(notes, image=None):
    image_text = extract_text(image) if image else ""
    prompt = f"Summarize and analyze the following student progress notes:\n{notes}\n\n{image_text}"
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=prompt,
        max_tokens=500
    )
    return response.choices[0].text.strip()

def extract_text_from_image(image):
    text = pytesseract.image_to_string(image)
    prompt = f"Extracted text from image:\n{text}\n\nProvide educational insight or summary."
    response = openai.Completion.create(
        model="text-davinci-003",
        prompt=prompt,
        max_tokens=500
    )
    return response.choices[0].text.strip()

def extract_text(image):
    if image is not None:
        return pytesseract.image_to_string(image)
    return ""

# --- Gradio Interface ---
with gr.Blocks() as demo:
    gr.Markdown("## πŸ‘©β€πŸ« Teacher's AI Assistant")

    with gr.Tabs():
        with gr.TabItem("πŸ“˜ Lesson Plan Generator"):
            subject = gr.Dropdown(choices=["Math", "Science", "English", "History"], label="Subject")
            grade = gr.Dropdown(choices=[str(i) for i in range(1, 13)], label="Grade Level")
            image1 = gr.Image(type="pil", label="Optional: Upload related material")
            lesson_btn = gr.Button("Generate Lesson Plan")
            lesson_output = gr.Textbox(label="AI Lesson Plan", lines=15)
            lesson_btn.click(generate_lesson_plan, [subject, grade, image1], lesson_output)

        with gr.TabItem("πŸ“ Grade Answer"):
            question = gr.Textbox(label="Question")
            student_answer = gr.Textbox(label="Student's Answer")
            image2 = gr.Image(type="pil", label="Optional: Upload student work")
            grade_btn = gr.Button("Grade")
            grade_output = gr.Textbox(label="Feedback", lines=8)
            grade_btn.click(grade_student_answer, [question, student_answer, image2], grade_output)

        with gr.TabItem("πŸ“ˆ Progress Tracker"):
            notes = gr.Textbox(label="Student Progress Notes", lines=8)
            image3 = gr.Image(type="pil", label="Optional: Upload progress notes")
            progress_btn = gr.Button("Analyze")
            progress_output = gr.Textbox(label="Analysis", lines=10)
            progress_btn.click(track_progress, [notes, image3], progress_output)

        with gr.TabItem("πŸ–ΌοΈ Image Upload"):
            image_input = gr.Image(type="pil", label="Upload an image of handwritten or printed work")
            img_btn = gr.Button("Analyze Image")
            image_output = gr.Textbox(label="AI Response from Image", lines=12)
            img_btn.click(extract_text_from_image, image_input, image_output)

demo.launch(debug=True, share=True)