File size: 6,555 Bytes
634b5dc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# feedback_generator.py
from llm_handler import get_gemini_response
from rag_manager import query_rag
from db_manager import get_student_characteristics
import prompts
import datetime
import streamlit as st

def get_events_summary_for_day(date_str: str, processed_chat_data: list = None) -> str:
    """

    获取指定日期的事件总结。

    优先使用当日处理的聊天数据,否则从RAG查询。

    """
    if processed_chat_data:
        summary_parts = []
        for item in processed_chat_data:
            # Ensure item has the expected keys
            student_name = item.get("student_name", "未知学生")
            observation = item.get("observation", "无具体描述")
            summary_parts.append(f"- {student_name}: {observation}")
        if summary_parts:
            return "\n".join(summary_parts)
        else: # processed_chat_data was empty or malformed
            st.info(f"当日处理的聊天数据为空或格式不正确 ({date_str})。")
            # Fall through to RAG query

    # Fallback to RAG if no direct processed_chat_data
    # This query needs to be general enough to pull daily highlights
    # Or specific if you store daily summary documents.
    st.info(f"尝试从RAG中检索日期 {date_str} 的整体活动信息...")
    rag_results = query_rag(
        query_text=f"{date_str} 发生的关键事件和整体情况",
        n_results=10, # Get a few diverse entries
        filter_metadata={"date": date_str} # Filter by date if metadata is set
    )
    if not rag_results:
        return f"关于日期 {date_str}:今日无特别记录或未能从RAG中检索到信息。"
    return f"关于日期 {date_str} 的记录:\n" + "\n".join([f"- {r}" for r in rag_results])


def generate_boss_feedback(today_events_summary: str):
    if not today_events_summary or "无特别记录" in today_events_summary:
        return "今日无足够信息生成老板反馈。"
    prompt = prompts.BOSS_FEEDBACK_USER_PROMPT_TEMPLATE.format(today_events_summary=today_events_summary)
    return get_gemini_response(prompt, system_instruction=prompts.BOSS_FEEDBACK_SYSTEM_PROMPT)

def generate_public_feedback(today_events_summary: str):
    if not today_events_summary or "无特别记录" in today_events_summary:
        return "今日无足够信息生成公共反馈。"
    prompt = prompts.PUBLIC_FEEDBACK_USER_PROMPT_TEMPLATE.format(today_events_summary=today_events_summary)
    return get_gemini_response(prompt, system_instruction=prompts.PUBLIC_FEEDBACK_SYSTEM_PROMPT)

def generate_parent_feedback(student_name: str, mode: str, date_str: str, processed_student_data_today: list = None):
    characteristics = get_student_characteristics(student_name) or "暂无该生详细特点记录。"
    
    if mode == "normal":
        today_student_specific_events = "今天没有关于该生的特别记录。"
        if processed_student_data_today: # Prefer data extracted today for this student
            student_obs = [item['observation'] for item in processed_student_data_today if item['student_name'] == student_name]
            if student_obs:
                today_student_specific_events = "\n".join([f"- {obs}" for obs in student_obs])
        
        if today_student_specific_events == "今天没有关于该生的特别记录.": # Fallback to RAG if not found in today's extract
            rag_student_events = query_rag(
                query_text=f"{student_name}{date_str} 的具体表现",
                n_results=5,
                filter_metadata={"student_name": student_name, "date": date_str}
            )
            if rag_student_events:
                today_student_specific_events = "\n".join([f"- {r}" for r in rag_student_events])
        
        prompt_vars = {
            "student_name": student_name,
            "student_characteristics": characteristics,
            "today_student_specific_events": today_student_specific_events
        }
        user_prompt = prompts.PARENT_NORMAL_USER_PROMPT_TEMPLATE.format(**prompt_vars)
        system_instruction = prompts.PARENT_NORMAL_SYSTEM_PROMPT

    elif mode == "lazy":
        past_events_list = query_rag(
            query_text=f"{student_name} 过往的各种积极表现和活动片段",
            n_results=10, # Get more for variety
            filter_metadata={"student_name": student_name} # No date filter for past events
        )
        # Filter out any very short or generic entries if possible
        past_events_for_student = "\n".join([f"- {r}" for r in past_events_list if len(r.split()) > 5]) if past_events_list else "暂无该生足够的多样化历史表现记录用于此模式。"
        
        if "暂无该生足够的多样化历史表现记录" in past_events_for_student and characteristics != "暂无该生详细特点记录。":
             st.info("偷懒模式:历史具体事件不足,将尝试结合学生特点进行创意生成。")
             # Fallback to a slightly modified LLM direct mode if lazy mode has no data
             user_prompt = prompts.PARENT_LLM_DIRECT_USER_PROMPT_TEMPLATE.format(
                student_name=student_name,
                student_characteristics=characteristics
            )
             system_instruction = prompts.PARENT_LLM_DIRECT_SYSTEM_PROMPT
        else:
            prompt_vars = {
                "student_name": student_name,
                "student_characteristics": characteristics, # Still useful for LLM to know
                "past_events_for_student": past_events_for_student
            }
            user_prompt = prompts.PARENT_LAZY_USER_PROMPT_TEMPLATE.format(**prompt_vars)
            system_instruction = prompts.PARENT_LAZY_SYSTEM_PROMPT


    elif mode == "llm_direct":
        if characteristics == "暂无该生详细特点记录。":
            return f"无法使用LLM直接生成模式,学生 {student_name} 的特点数据不足。请先更新其特点。"
        prompt_vars = {
            "student_name": student_name,
            "student_characteristics": characteristics
        }
        user_prompt = prompts.PARENT_LLM_DIRECT_USER_PROMPT_TEMPLATE.format(**prompt_vars)
        system_instruction = prompts.PARENT_LLM_DIRECT_SYSTEM_PROMPT
    else:
        st.error("无效的家长反馈模式。")
        return "无效的反馈模式。"

    return get_gemini_response(user_prompt, system_instruction=system_instruction)