File size: 4,113 Bytes
74ebe5c
 
 
 
439ab17
74ebe5c
 
 
 
439ab17
74ebe5c
 
 
 
 
 
 
 
 
 
 
 
 
439ab17
 
 
74ebe5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439ab17
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import gradio as gr
import pandas as pd
from model_handler import ModelHandler
from config import LING_1T
from .agent_common_utils import format_df_to_string

def fetch_inspiration_agent(prompt: str, editor_content: str, style: str, kb_df: pd.DataFrame, short_outline_df: pd.DataFrame, long_outline_df: pd.DataFrame):
    """
    Agent for fetching inspiration options using a real LLM.
    (Original 3-option expansion logic)
    """
    print("\n[Agent][fetch_inspiration_agent] === 推理类型:灵感扩写 ===")
    print("【发出的完整上下文】")
    print("prompt:", repr(prompt))
    print("editor_content:", repr(editor_content))
    print("style:", repr(style))
    print("kb_df:", repr(kb_df.to_dict("records")))
    print("short_outline_df:", repr(short_outline_df.to_dict("records")))
    print("long_outline_df:", repr(long_outline_df.to_dict("records")))

    try:
        # 1. Format context from UI inputs
        style_context = f"### 整体章程\n{style}\n\n"
        kb_context = format_df_to_string(kb_df, "知识库")
        short_outline_context = format_df_to_string(short_outline_df, "当前章节大纲")
        long_outline_context = format_df_to_string(long_outline_df, "故事总纲")

        # 2. Build System Prompt
        system_prompt = (
            "你是一个富有创意的长篇小说家,你的任务是根据提供的背景设定和当前文本,创作三个不同的、有创意的剧情发展方向。\n"
            "请严格遵守以下格式:直接开始写第一个选项,然后用 `[END_OF_CHOICE]` 作为分隔符,接着写第二个选项,再用 `[END_OF_CHOICE]` 分隔,最后写第三个选项。不要有任何额外的解释或编号。\n"
            "例如:\n"
            "剧情发展一的内容...[END_OF_CHOICE]剧情发展二的内容...[END_OF_CHOICE]剧情发展三的内容..."
        )
        
        # 3. Build User Prompt
        full_context = style_context + kb_context + long_outline_context + short_outline_context
        user_prompt = (
            f"### 背景设定与大纲\n{full_context}\n"
            f"### 当前已写内容 (末尾部分)\n{editor_content[-2000:]}\n\n"
            f"### 用户指令\n{prompt if prompt else '请基于当前内容,自然地延续剧情。'}"
        )

        # 4. Call LLM
        model_handler = ModelHandler()
        response_generator = model_handler.generate_code(
            system_prompt=system_prompt,
            user_prompt=user_prompt,
            model_choice=LING_1T
        )
        
        full_response = "".join(chunk for chunk in response_generator)

        print("【收到的完整上下文】")
        print("full_response:", repr(full_response))

        # 5. Parse response and update UI
        choices = full_response.split("[END_OF_CHOICE]")
        # Ensure we have exactly 3 choices, padding with placeholders if necessary
        choices += ["(模型未生成足够选项)"] * (3 - len(choices))
        
        print(f"[Agent] LLM Choices Received: {len(choices)}")

        return gr.update(visible=True), choices[0].strip(), choices[1].strip(), choices[2].strip()

    except Exception as e:
        print(f"[Agent] Error fetching inspiration: {e}")
        error_message = f"获取灵感时出错: {e}"
        return gr.update(visible=True), error_message, "请检查日志", "请检查日志"

def apply_inspiration_agent(current_text: str, inspiration_text: str):
    """
    Agent for applying selected inspiration to the editor.
    """
    print("\n[Agent][apply_inspiration_agent] === 推理类型:应用灵感 ===")
    print("【发出的完整上下文】")
    print("current_text:", repr(current_text))
    print("inspiration_text:", repr(inspiration_text))
    if not current_text:
        new_text = inspiration_text
    else:
        new_text = current_text + "\n\n" + inspiration_text
    print("【收到的完整上下文】")
    print("new_text:", repr(new_text))
    # Return a tuple that unpacks into the outputs for the Gradio event handler
    return new_text, gr.update(visible=False), ""