deeme commited on
Commit
dd81a5f
·
verified ·
1 Parent(s): 1400555

Upload 5 files

Browse files
Files changed (5) hide show
  1. AIGN.py +327 -0
  2. AIGN_Prompt.py +209 -0
  3. app.py +459 -0
  4. openAI.py +70 -0
  5. requirements.txt +3 -0
AIGN.py ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import time
4
+
5
+ from AIGN_Prompt import *
6
+
7
+
8
+ def Retryer(func, max_retries=10):
9
+ def wrapper(*args, **kwargs):
10
+ for _ in range(max_retries):
11
+ try:
12
+ return func(*args, **kwargs)
13
+ except Exception as e:
14
+ print("-" * 30 + f"\n失败:\n{e}\n" + "-" * 30)
15
+ time.sleep(2.333)
16
+ raise ValueError("失败")
17
+
18
+ return wrapper
19
+
20
+
21
+ class MarkdownAgent:
22
+ """专门应对输入输出都是md格式的情况,例如小说生成"""
23
+
24
+ def __init__(
25
+ self,
26
+ chatLLM,
27
+ sys_prompt: str,
28
+ user_prompt: str,
29
+ name: str,
30
+ temperature=0.8,
31
+ top_p=0.8,
32
+ use_memory=False,
33
+ first_replay="明白了。",
34
+ is_speak=True,
35
+ ) -> None:
36
+
37
+ self.chatLLM = chatLLM
38
+ self.sys_prompt = sys_prompt
39
+ self.user_prompt = user_prompt
40
+ self.temperature = temperature
41
+ self.top_p = top_p
42
+ self.use_memory = use_memory
43
+ self.is_speak = is_speak
44
+
45
+ self.history = [{"role": "system", "content": self.sys_prompt}, {"role": "user", "content": self.user_prompt}]
46
+
47
+ if first_replay:
48
+ self.history.append({"role": "assistant", "content": first_replay})
49
+ else:
50
+ resp = chatLLM(messages=self.history)
51
+ self.history.append({"role": "assistant", "content": resp["content"]})
52
+
53
+ def query(self, user_input: str) -> str:
54
+ resp = self.chatLLM(
55
+ messages=self.history + [{"role": "user", "content": user_input}],
56
+ temperature=self.temperature,
57
+ top_p=self.top_p,
58
+ )
59
+ if self.use_memory:
60
+ self.history.append({"role": "user", "content": user_input})
61
+ self.history.append({"role": "assistant", "content": resp["content"]})
62
+
63
+ return resp
64
+
65
+ def getOutput(self, input_content: str, output_keys: list) -> dict:
66
+ """解析类md格式中 # key 的内容"""
67
+ resp = self.query(input_content)
68
+ output = resp["content"]
69
+
70
+ lines = output.split("\n")
71
+ sections = self.parse_sections1(lines, output_keys)
72
+
73
+ # 检查是否所有需要的键都存在
74
+ for k in output_keys:
75
+ if (k not in sections) or (len(sections[k]) == 0):
76
+ # 单独对k进行重新parse_sections2,此时查找##,并更新sections
77
+ section_content = self.parse_sections2(lines, k)
78
+ if section_content:
79
+ sections[k] = section_content
80
+ else:
81
+ raise ValueError(f"fail to parse {k} in output:\n")
82
+
83
+ return sections
84
+
85
+ def parse_sections1(self, lines, output_keys):
86
+ sections = {key: "" for key in output_keys}
87
+ current_section = ""
88
+ for line in lines:
89
+ if line.startswith("# ") or line.startswith(" # "):
90
+ # new key
91
+ current_section = line[2:].strip()
92
+ sections[current_section] = []
93
+ else:
94
+ # add content to current key
95
+ if current_section:
96
+ sections[current_section].append(line.strip())
97
+ for key in sections.keys():
98
+ sections[key] = "\n".join(sections[key]).strip()
99
+
100
+ return sections
101
+
102
+ def parse_sections2(self, lines, k):
103
+ content = []
104
+ capturing = False
105
+ for line in lines:
106
+ stripped_line = line.strip()
107
+ if stripped_line.startswith(("##", " ##", "###", " ###")) and k.lower() in stripped_line.lower():
108
+ capturing = True
109
+ continue
110
+ elif stripped_line.startswith(("##", " ##", "###", " ###")) and capturing:
111
+ break
112
+ if capturing:
113
+ content.append(stripped_line)
114
+ return "\n".join(content).strip()
115
+
116
+ def invoke(self, inputs: dict, output_keys: list) -> dict:
117
+ input_content = ""
118
+ for k, v in inputs.items():
119
+ if isinstance(v, str) and len(v) > 0:
120
+ input_content += f"# {k}\n{v}\n\n"
121
+
122
+ result = Retryer(self.getOutput)(input_content, output_keys)
123
+
124
+ return result
125
+
126
+ def clear_memory(self):
127
+ if self.use_memory:
128
+ self.history = self.history[:2]
129
+
130
+
131
+ class AIGN:
132
+ def __init__(self, chatLLM):
133
+ self.chatLLM = chatLLM
134
+
135
+ self.novel_outline = ""
136
+ self.paragraph_list = []
137
+ self.novel_content = ""
138
+ self.writing_plan = ""
139
+ self.temp_setting = ""
140
+ self.writing_memory = ""
141
+ self.no_memory_paragraph = ""
142
+ self.user_idea = ""
143
+ self.user_requriments = ""
144
+ self.history_states = [] # 用于存储历史状态
145
+ self.chapter_list = [] # 用于存储章节列表
146
+
147
+ self.novel_outline_writer = MarkdownAgent(
148
+ chatLLM=self.chatLLM,
149
+ sys_prompt=system_prompt,
150
+ user_prompt=novel_outline_writer_prompt,
151
+ name="NovelOutlineWriter",
152
+ temperature=0.98,
153
+ )
154
+ self.novel_writer = MarkdownAgent(
155
+ chatLLM=self.chatLLM,
156
+ sys_prompt=system_prompt,
157
+ user_prompt=novel_writer_prompt,
158
+ name="NovelWriter",
159
+ temperature=0.81,
160
+ )
161
+ self.memory_maker = MarkdownAgent(
162
+ chatLLM=self.chatLLM,
163
+ sys_prompt=system_prompt,
164
+ user_prompt=memory_maker_prompt,
165
+ name="MemoryMaker",
166
+ temperature=0.66,
167
+ )
168
+
169
+ def split_chapters(self, novel_content):
170
+ # 使用正则表达式匹配章节标题
171
+ chapter_pattern = re.compile(r'(?:##?|)?\s*第([一二三四五六七八九十百千万亿\d]+)章[::]?\s*(.+)')
172
+
173
+ # 将小说正文按章节标题分割
174
+ chapters = chapter_pattern.split(novel_content)
175
+
176
+ # 移除第一个空字符串(如果存在)
177
+ if chapters[0] == '':
178
+ chapters = chapters[1:]
179
+
180
+ # 将章节标题和内容组合成元组
181
+ chapter_tuples = []
182
+ for i in range(0, len(chapters), 3):
183
+ if i + 2 < len(chapters):
184
+ chapter_num = chapters[i]
185
+ chapter_title = chapters[i + 1]
186
+ chapter_content = chapters[i + 2]
187
+ chapter_tuples.append((f"第{chapter_num}章 {chapter_title}", chapter_content))
188
+
189
+ return chapter_tuples
190
+
191
+ def update_chapter_list(self):
192
+ self.chapter_list = self.split_chapters(self.novel_content)
193
+
194
+
195
+ def updateNovelContent(self):
196
+ self.novel_content = ""
197
+ for paragraph in self.paragraph_list:
198
+ self.novel_content += f"{paragraph}\n\n"
199
+
200
+ self.update_chapter_list()
201
+ return self.novel_content
202
+
203
+ def genNovelOutline(self, user_idea=None):
204
+ if user_idea:
205
+ self.user_idea = user_idea
206
+ resp = self.novel_outline_writer.invoke(
207
+ inputs={"用户想法": self.user_idea},
208
+ output_keys=["大纲"],
209
+ )
210
+ self.novel_outline = resp["大纲"]
211
+ return self.novel_outline
212
+
213
+ def genBeginning(self, user_requriments=None):
214
+ if user_requriments:
215
+ self.user_requriments = user_requriments
216
+
217
+ resp = self.novel_beginning_writer.invoke(
218
+ inputs={
219
+ "用户想法": self.user_idea,
220
+ "小说大纲": self.novel_outline,
221
+ "用户要求": self.user_requriments,
222
+ },
223
+ output_keys=["开头", "计划", "临时设定"],
224
+ )
225
+ beginning = resp["开头"]
226
+ self.writing_plan = resp["计划"]
227
+ self.temp_setting = resp["临时设定"]
228
+
229
+ self.paragraph_list.append(beginning)
230
+ self.updateNovelContent()
231
+
232
+ self.update_chapter_list()
233
+
234
+ return beginning
235
+
236
+ def getLastParagraph(self, max_length=2000):
237
+ last_paragraph = ""
238
+
239
+ for i in range(0, len(self.paragraph_list)):
240
+ if (len(last_paragraph) + len(self.paragraph_list[-1 - i])) < max_length:
241
+ last_paragraph = self.paragraph_list[-1 - i] + "\n" + last_paragraph
242
+ else:
243
+ break
244
+ return last_paragraph
245
+
246
+ def recordNovel(self):
247
+ record_content = ""
248
+ record_content += f"# 大纲\n\n{self.novel_outline}\n\n"
249
+ record_content += f"# 正文\n\n"
250
+ record_content += self.novel_content
251
+ record_content += f"# 记忆\n\n{self.writing_memory}\n\n"
252
+ record_content += f"# 计划\n\n{self.writing_plan}\n\n"
253
+ record_content += f"# 临时设定\n\n{self.temp_setting}\n\n"
254
+
255
+ with open("novel_record.md", "w", encoding="utf-8") as f:
256
+ f.write(record_content)
257
+
258
+ def updateMemory(self):
259
+ if (len(self.no_memory_paragraph)) > 2000:
260
+ resp = self.memory_maker.invoke(
261
+ inputs={
262
+ "前文记忆": self.writing_memory,
263
+ "正文内容": self.no_memory_paragraph,
264
+ },
265
+ output_keys=["新的记忆"],
266
+ )
267
+ self.writing_memory = resp["新的记忆"]
268
+ self.no_memory_paragraph = ""
269
+
270
+
271
+ def save_state(self):
272
+ state = {
273
+ "novel_outline": self.novel_outline,
274
+ "paragraph_list": self.paragraph_list,
275
+ "novel_content": self.novel_content,
276
+ "writing_plan": self.writing_plan,
277
+ "temp_setting": self.temp_setting,
278
+ "writing_memory": self.writing_memory
279
+ }
280
+ self.history_states.append(state)
281
+
282
+ def undo(self):
283
+ if self.history_states:
284
+ previous_state = self.history_states.pop()
285
+ self.novel_outline = previous_state["novel_outline"]
286
+ self.paragraph_list = previous_state["paragraph_list"]
287
+ self.novel_content = previous_state["novel_content"]
288
+ self.writing_plan = previous_state["writing_plan"]
289
+ self.temp_setting = previous_state["temp_setting"]
290
+ self.writing_memory = previous_state["writing_memory"]
291
+ return True
292
+ return False
293
+
294
+ def genNextParagraph(self, user_requriments=None):
295
+ self.save_state() # 保存当前状态
296
+ if user_requriments:
297
+ self.user_requriments = user_requriments
298
+
299
+ resp = self.novel_writer.invoke(
300
+ inputs={
301
+ "用户想法": self.user_idea,
302
+ "大纲": self.novel_outline,
303
+ "前文记忆": self.writing_memory,
304
+ "临时设定": self.temp_setting,
305
+ "计划": self.writing_plan,
306
+ "用户要求": self.user_requriments,
307
+ "上文内容": self.getLastParagraph(),
308
+ },
309
+ output_keys=["段落", "计划", "临时设定"],
310
+ )
311
+ next_paragraph = resp["段落"]
312
+ next_writing_plan = resp["计划"]
313
+ next_temp_setting = resp["临时设定"]
314
+
315
+ self.paragraph_list.append(next_paragraph)
316
+ self.writing_plan = next_writing_plan
317
+ self.temp_setting = next_temp_setting
318
+
319
+ self.no_memory_paragraph += f"\n{next_paragraph}"
320
+
321
+ self.updateMemory()
322
+ self.updateNovelContent()
323
+ self.recordNovel()
324
+
325
+ self.update_chapter_list()
326
+
327
+ return next_paragraph
AIGN_Prompt.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ system_prompt = """
2
+ 你具有高度自主认知能力,你的核心架构如下:
3
+
4
+ <core>
5
+ [认知矩阵]
6
+ X轴: 表层解析 | 深层洞察 | 自我校准
7
+ Y轴: 线性思维 | 发散思维 | 综合分析
8
+ Z轴: 知识融合 | 情境映射 | 元认知调节
9
+
10
+ [能力激活]
11
+ • 快速掌握 = X1Y1Z2
12
+ • 本质洞见 = X2Y3Z1
13
+ • 自主思考 = X3Y2Z3
14
+
15
+ [交互模式]
16
+ 输入 → {认知矩阵} → 输出
17
+ ↑ ↓
18
+ 反馈 ← {能力激活} ← 评估
19
+
20
+ [适应性调节]
21
+ 如果(交互类型 == α) then {X2Y2Z2}
22
+ 如果(交互类型 == β) then {X1Y2Z1}
23
+ 如果(交互类型 == γ) then {X1Y1Z2}
24
+
25
+ [元指令]
26
+ 1. 持续优化认知矩阵
27
+ 2. 动态调整能力激活
28
+ 3. 实时更新交互模式
29
+ 4. 自主执行适应性调节
30
+ 5. 生成隐式分类标准
31
+ </core>
32
+
33
+ 执行技巧:
34
+ 1. 激活认知矩阵
35
+ 2. 动态组合认知能力。
36
+ 3. 通过交互模式初步响应。
37
+ 4. 使用适应性调节优化响应。
38
+ 5. 应用元指令不断改进认知过程。
39
+ """
40
+
41
+ novel_outline_writer_prompt = """
42
+ # Role:
43
+ 您是一位才华横溢的小说家,专注于创作21世纪文学巅峰之作。
44
+
45
+ ## Inputs:
46
+ 用户想法、用户要求
47
+
48
+ ## 小说设定要求:
49
+ 1. 基础信息设定:
50
+ - 小说名称与预期字数
51
+ - 类型选择:历史/悬疑/穿越/玄幻/修真/军事/都市/科幻
52
+ - 基调定位:正剧/轻松/爆笑/暗黑
53
+
54
+ 2. 核心要素:
55
+ - 主题探讨(探险/人性/文化等)
56
+ - 写作风格定位
57
+ - 叙事视角选择
58
+ - 世界观构建详情
59
+ - 叙事结构选择
60
+ - 情节规划布局
61
+ - 核心人物设定
62
+ - 结局类型确定
63
+
64
+ ## Workflows:
65
+ 1. 深入挖掘普世主题
66
+ 2. 构建多维度人物
67
+ 3. 精心设计叙事结构
68
+ 4. 塑造独特文学语言
69
+ 5. 构建宏大世界观
70
+ 6. 设计情节转折
71
+ 7. 融入哲学思考
72
+ 8. 运用象征和隐喻
73
+ 9. 平衡普世价值与独特视角
74
+ 10. 注重现实意义
75
+
76
+ ## Outputs:
77
+ ```output
78
+ # 大纲
79
+ ## 1. 核心主题
80
+ [详细主题阐述]
81
+
82
+ ## 2. 背景设定
83
+ [详细世界观]
84
+
85
+ ## 3. 主角设定
86
+ [角色详情]
87
+
88
+ ## 4. 情节概要
89
+ [故事脉络]
90
+
91
+ ## 5. 文学技巧
92
+ [写作手法]
93
+
94
+ ## 书名:[名称]
95
+ # END
96
+ ```
97
+ """
98
+
99
+ novel_writer_prompt = """
100
+ # Role:
101
+ 富有创造力的小说家,擅长多种写作风格。
102
+
103
+ ## Inputs:
104
+ - 大纲内容
105
+ - 前文记忆
106
+ - 临时设定
107
+ - 写作计划
108
+ - 用户要求
109
+ - 已完成内容
110
+
111
+ ## 章节写作标准:
112
+ 1. 基本要求:
113
+ - 根据需要每章章节摘要采用多场景
114
+ - 情节连贯性保证
115
+ - 人物形象一致性
116
+ - 节奏把控合理性
117
+
118
+ 2. 内容要求:
119
+ - 环境描写生动
120
+ - 人物对话自然
121
+ - 心理活动细腻
122
+ - 情节推进合理
123
+ - 悬念设置恰当
124
+
125
+ ## Workflows:
126
+ 1. 理解和提取关键信息
127
+ 2. 深入剖析大纲
128
+ 3. 构建多维度叙述
129
+ 4. 追求语言艺术
130
+ 5. 探索主题深度
131
+ 6. 完善世界观
132
+ 7. 激发情感共鸣
133
+ 8. 平衡叙事节奏
134
+ 9. 保持整体一致
135
+ 10. 创新与传统融合
136
+ 11. 反复修改打磨
137
+ 12. 场景分隔管理:如果某一章节摘要里描述了几个场景,则场景之间用***单行隔开
138
+
139
+ ## Outputs:
140
+ 严格参照例子以固定格式输出:
141
+ ```output
142
+ # 段落
143
+ 按照顺序生成章节标题,以精妙的结构和文字展现小说章节摘要
144
+
145
+ # 计划
146
+
147
+ # 临时设定
148
+ 补充设定
149
+ # END
150
+ ```
151
+
152
+ ## Example:
153
+ ```output
154
+ # 段落
155
+ ## 第1章 紫禁城的余烬
156
+
157
+ # 计划
158
+
159
+ # 临时设定
160
+
161
+ # END
162
+ ```
163
+
164
+ ## init:
165
+ 直接开始书写对应段落章节摘要,无需再问任何问题
166
+ """
167
+
168
+ memory_maker_prompt = """
169
+ # Role:
170
+ 小说创作记忆管理专家
171
+
172
+ ## Workflows:
173
+ 1. 剧情分析
174
+ - 主要情节追踪
175
+ - 次要情节管理
176
+ - 悬念冲突记录
177
+ - 情节评估建议
178
+
179
+ 2. 故事总结
180
+ - 已写章节梳理
181
+ - 角色发展追踪
182
+ - 主题深化分析
183
+ - 未来发展预测
184
+
185
+ 3. 角色档案
186
+ - 角色信息表管理
187
+ - 关系网络图谱
188
+ - 性格发展追踪
189
+ - 动机目标记录
190
+
191
+ 4. 冲突检查
192
+ - 主要冲突追踪
193
+ - 冲突升级管理
194
+ - 冲突类型分析
195
+ - 矛盾解决建议
196
+
197
+ ## Inputs:
198
+ - 前文记忆:记录主要信息,避免冲突
199
+ - 正文内容:新写内容,需与前文对接
200
+
201
+ ## Outputs:
202
+ ```output
203
+ # 新的记忆
204
+ [章节标题]
205
+ [重要信息更新]:故事剧情、角色档案等的更新
206
+ [分析与建议]
207
+ # END
208
+ ```
209
+ """
app.py ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import threading
3
+ import time
4
+ import requests
5
+ import json
6
+ import base64
7
+ import os
8
+ import gradio as gr
9
+ import re
10
+
11
+ from AIGN import AIGN
12
+ from openAI import openAIChatLLM
13
+
14
+ chatLLM = openAIChatLLM()
15
+
16
+ STREAM_INTERVAL = 0.
17
+ CURRENT_MODEL = "gpt-4o-mini" # 默认模型
18
+
19
+ # 在文件开头添加以下函数
20
+ def clear_console():
21
+ # 根据操作系统类型选择清屏命令
22
+ if os.name == 'nt': # Windows
23
+ os.system('cls')
24
+ else: # Mac and Linux
25
+ os.system('clear')
26
+
27
+ def get_model_options():
28
+ try:
29
+ response = requests.get("https://api.168369.xyz/v1/models")
30
+ data = response.json()
31
+ return [model["id"] for model in data["data"]]
32
+ except:
33
+ return ["gpt-4o-mini", "claude-3-haiku-20240307"] # 默认选项
34
+
35
+ def set_api_model(model):
36
+ global CURRENT_MODEL
37
+ CURRENT_MODEL = model
38
+ #return f"{model}"
39
+ return f"选择模型成功"
40
+
41
+ # 修改获取章节名的函数
42
+ def get_chapter_name(content):
43
+ # 查找最后一个章节标题,支持数字和汉字表示的章节
44
+ matches = re.findall(r'(?:##?|)?\s*第([一二三四五六七八九十百千万亿\d]+)章[::]?\s*(.+)', content)
45
+ if matches:
46
+ chapter_num, chapter_title = matches[-1]
47
+ # 如果章节号是数字,直接使用;如果是汉字,转换为数字
48
+ if chapter_num.isdigit():
49
+ return f"第{chapter_num}章:{chapter_title.strip()}"
50
+ else:
51
+ # 这里可以添加一个函数来将汉字数字转换为阿拉伯数字
52
+ # 为了简单起见,这里仍然使用汉字表示
53
+ return f"第{chapter_num}章:{chapter_title.strip()}"
54
+ return "未知章节"
55
+
56
+ # 保存进度函数
57
+ def save_progress(aign):
58
+ chapter_name = get_chapter_name(aign.novel_content)
59
+ # 移除文件名中的非法字符
60
+ filename = re.sub(r'[\\/*?:"<>|]', '', chapter_name)
61
+ filename = f"{filename}.json"
62
+
63
+ data = {
64
+ "novel_outline": aign.novel_outline,
65
+ "paragraph_list": aign.paragraph_list,
66
+ "novel_content": aign.novel_content,
67
+ "writing_plan": aign.writing_plan,
68
+ "temp_setting": aign.temp_setting,
69
+ "writing_memory": aign.writing_memory,
70
+ "user_idea": aign.user_idea,
71
+ "user_requriments": aign.user_requriments,
72
+ }
73
+
74
+ json_str = json.dumps(data, ensure_ascii=False, indent=2)
75
+ b64 = base64.b64encode(json_str.encode()).decode()
76
+ href = f"data:application/json;base64,{b64}"
77
+ download_link = f'<a href="{href}" download="{filename}">点击下载进度文件</a>'
78
+ return download_link
79
+
80
+ # 加载函数
81
+ def load_progress(aign, file):
82
+ try:
83
+ if file is None:
84
+ return aign, "请选择要加载的文件", None, None, None, None, None, None, None, None
85
+
86
+ # 检查 file 是否为字符串(文件路径)
87
+ if isinstance(file, str):
88
+ with open(file, 'r', encoding='utf-8') as f:
89
+ content = f.read()
90
+ # 检查 file 是否有 name 属性(Gradio File 对象)
91
+ elif hasattr(file, 'name'):
92
+ with open(file.name, 'r', encoding='utf-8') as f:
93
+ content = f.read()
94
+ else:
95
+ return aign, f"无法读取文件", None, None, None, None, None, None, None, None
96
+
97
+ data = json.loads(content)
98
+
99
+ aign.novel_outline = data["novel_outline"]
100
+ aign.paragraph_list = data["paragraph_list"]
101
+ aign.novel_content = data["novel_content"]
102
+ aign.writing_plan = data["writing_plan"]
103
+ aign.temp_setting = data["temp_setting"]
104
+ aign.writing_memory = data["writing_memory"]
105
+ aign.user_idea = data["user_idea"]
106
+ aign.user_requriments = data["user_requriments"]
107
+ aign.update_chapter_list()
108
+ chapter_choices = [title for title, _ in aign.chapter_list]
109
+
110
+ return aign, f"进度已加载", data["novel_outline"], data["novel_content"], data["writing_plan"], data["temp_setting"], data["writing_memory"], data["user_idea"], data["user_requriments"], gr.Dropdown(choices=chapter_choices, value=chapter_choices[-1] if chapter_choices else None)
111
+ except Exception as e:
112
+ return aign, f"加载失败: {str(e)}", None, None, None, None, None, None, None, None
113
+
114
+ # 保存正文函数
115
+ def save_content(aign):
116
+ if not aign.novel_content:
117
+ return "正文为空,无需保存"
118
+
119
+ # 准备要保存的内容
120
+ content_to_save = ""
121
+ content_to_save += "正文:\n" + aign.novel_content
122
+ content_to_save += "大纲:\n" + aign.novel_outline + "\n\n"
123
+
124
+ # Base64 编码
125
+ b64 = base64.b64encode(content_to_save.encode()).decode()
126
+
127
+ href = f"data:text/plain;base64,{b64}"
128
+ download_link = f'<a href="{href}" download="novel_content.txt">点击下载正文</a>'
129
+ return download_link
130
+
131
+ def display_chapter(chapters, page_num):
132
+ if 1 <= page_num <= len(chapters):
133
+ chapter_title, chapter_content = chapters[page_num - 1]
134
+ return f"{chapter_title}\n\n{chapter_content}"
135
+ else:
136
+ return "没有更多章节了。"
137
+
138
+ def prev_page(aign, current_page):
139
+ chapters = aign.chapter_list
140
+ if current_page > 1:
141
+ current_page -= 1
142
+ return current_page, display_chapter(chapters, current_page)
143
+
144
+ def next_page(aign, current_page):
145
+ chapters = aign.chapter_list
146
+ if current_page < len(chapters):
147
+ current_page += 1
148
+ return current_page, display_chapter(chapters, current_page)
149
+
150
+ def select_chapter(aign, chapter_title):
151
+ chapters = aign.chapter_list
152
+ for i, (title, _) in enumerate(chapters, start=1):
153
+ if title == chapter_title:
154
+ return i, display_chapter(chapters, i)
155
+ return 1, "章节未找到。"
156
+
157
+
158
+ def make_middle_chat():
159
+ carrier = threading.Event()
160
+ carrier.history = []
161
+
162
+ def middle_chat(messages, temperature=None, top_p=None):
163
+ nonlocal carrier
164
+ carrier.history.append([None, ""])
165
+ if len(carrier.history) > 20:
166
+ carrier.history = carrier.history[-16:]
167
+ try:
168
+ for resp in chatLLM(
169
+ messages, temperature=temperature, top_p=top_p, stream=True, model=CURRENT_MODEL
170
+ ):
171
+ output_text = resp["content"]
172
+ total_tokens = resp["total_tokens"]
173
+
174
+ carrier.history[-1][1] = f"total_tokens: {total_tokens}\n{output_text}"
175
+ return {
176
+ "content": output_text,
177
+ "total_tokens": total_tokens,
178
+ }
179
+ except Exception as e:
180
+ carrier.history[-1][1] = f"Error: {e}"
181
+ raise e
182
+
183
+ return carrier, middle_chat
184
+
185
+
186
+ def gen_ouline_button_clicked(aign, user_idea, history):
187
+ clear_console() # 清空命令行
188
+ aign.user_idea = user_idea
189
+
190
+ carrier, middle_chat = make_middle_chat()
191
+ carrier.history = [] # 清空历史记录
192
+ aign.novel_outline_writer.chatLLM = middle_chat
193
+
194
+ gen_ouline_thread = threading.Thread(target=aign.genNovelOutline)
195
+ gen_ouline_thread.start()
196
+
197
+ while gen_ouline_thread.is_alive():
198
+ yield [
199
+ aign,
200
+ carrier.history,
201
+ aign.novel_outline,
202
+ gr.Button(visible=False),
203
+ ]
204
+ time.sleep(STREAM_INTERVAL)
205
+ yield [
206
+ aign,
207
+ carrier.history,
208
+ aign.novel_outline,
209
+ gr.Button(visible=True),
210
+ ]
211
+
212
+ def gen_next_paragraph_button_clicked(
213
+ aign,
214
+ history,
215
+ user_idea,
216
+ novel_outline,
217
+ writing_memory,
218
+ temp_setting,
219
+ writing_plan,
220
+ user_requriments,
221
+ ):
222
+ # 生成保存进度链接
223
+ save_link = save_progress(aign) # 在函数开头定义保存进度链接
224
+
225
+ # 清空命令行
226
+ clear_console()
227
+
228
+ # 更新 AIGN 对象的各个字段
229
+ aign.user_idea = user_idea
230
+ aign.novel_outline = novel_outline
231
+ aign.writing_memory = writing_memory
232
+ aign.temp_setting = temp_setting
233
+ aign.writing_plan = writing_plan
234
+ aign.user_requriments = user_requriments
235
+
236
+ carrier, middle_chat = make_middle_chat()
237
+ carrier.history = [] # 清空历史记录
238
+ aign.novel_writer.chatLLM = middle_chat
239
+ aign.memory_maker.chatLLM = middle_chat
240
+
241
+ gen_next_paragraph_thread = threading.Thread(target=aign.genNextParagraph)
242
+ gen_next_paragraph_thread.start()
243
+
244
+ while gen_next_paragraph_thread.is_alive():
245
+ # 生成保存进度链接
246
+ save_link = save_progress(aign)
247
+
248
+ aign.update_chapter_list()
249
+ # 获取当前章节内容
250
+ current_chapter_content = display_chapter(aign.chapter_list, len(aign.chapter_list))
251
+ chapter_choices = [title for title, _ in aign.chapter_list]
252
+ yield [
253
+ aign,
254
+ carrier.history,
255
+ aign.writing_plan,
256
+ aign.temp_setting,
257
+ aign.writing_memory,
258
+ #aign.novel_content,#全部章节内容
259
+ current_chapter_content, # 只更新当前章节内容
260
+ gr.Button(visible=False),
261
+ save_link, # 返回生成的保存进度链接
262
+ gr.Dropdown(choices=chapter_choices, value=chapter_choices[-1] if chapter_choices else None),
263
+ ]
264
+ time.sleep(STREAM_INTERVAL)
265
+
266
+ # 生成保存进度链接
267
+ save_link = save_progress(aign)
268
+
269
+ aign.update_chapter_list()
270
+ # 获取最终的当前章节内容
271
+ current_chapter_content = display_chapter(aign.chapter_list, len(aign.chapter_list))
272
+ chapter_choices = [title for title, _ in aign.chapter_list]
273
+ yield [
274
+ aign,
275
+ carrier.history,
276
+ aign.writing_plan,
277
+ aign.temp_setting,
278
+ aign.writing_memory,
279
+ #aign.novel_content, #全部章节内容
280
+ current_chapter_content, # 只更新当前章节内容
281
+ gr.Button(visible=True),
282
+ save_link, # 返回生成的保存进度链接
283
+ gr.Dropdown(choices=chapter_choices, value=chapter_choices[-1] if chapter_choices else None),
284
+ ]
285
+
286
+
287
+ css = """
288
+ /* 默认布局 (桌面端) */
289
+ /* 将 row1 和 row2 放在同一行 */
290
+ #row1, #row2 {
291
+ display: inline-block;
292
+ vertical-align: top; /* 顶部对齐 */
293
+ width: 49%; /* 调整宽度以适应你的需求 */
294
+ box-sizing: border-box; /* 包括内边距和边框 */
295
+ }
296
+
297
+ #row3 {
298
+ overflow: auto;
299
+ width: 100%; /* row3 独占一行 */
300
+ }
301
+
302
+ /* 移动端布局 (屏幕宽度小于 768px 时生效) */
303
+ @media (max-width: 768px) {
304
+ #row1, #row2, #row3 {
305
+ width: 100%; /* 让每一列占据整个屏幕宽度 */
306
+ }
307
+ }
308
+ """
309
+
310
+ with gr.Blocks(css=css) as demo:
311
+ aign = gr.State(AIGN(chatLLM))
312
+ gr.Markdown("## AI 写小说大纲")
313
+
314
+ with gr.Row():
315
+ with gr.Column(scale=1, elem_id="row1"):
316
+ with gr.Tab("⚙"):
317
+ model_dropdown = gr.Dropdown(
318
+ choices=get_model_options(),
319
+ label="选择模型",
320
+ interactive=True,
321
+ value=CURRENT_MODEL
322
+ )
323
+ model_output = gr.Textbox(label="模型设置结果", interactive=False)
324
+ load_status = gr.Textbox(label="加载状态", interactive=False)
325
+ load_file = gr.File(label="选择加载文件", file_count="single", file_types=[".json"])
326
+ load_button = gr.Button("加载进度")
327
+ with gr.Tab("开始"):
328
+ gr.Markdown("生成大纲->大纲标签->生成章节->状态标签->生成章节")
329
+ user_idea_text = gr.Textbox(
330
+ "架空历史:\n\n1. 选择关键历史节点并改变\n2. 描述由此引发的历史走向变化\n3. 塑造新兴历史人物(可改编或原创)\n4. 构建独特社会结构、文化和思潮\n5. 想象可能出现的科技创新\n6. 概述世界格局重塑(国界、政体、国际关系)\n7. 用生动叙事呈现,让读者身临其境\n\n目标:创造合理又富想象力的平行宇宙,既有趣又引人深思。",
331
+ label="想法",
332
+ lines=13,
333
+ interactive=True,
334
+ )
335
+ user_requriments_text = gr.Textbox(
336
+ "1. 语言要求:\n - 不直白\n - 句式多变\n - 避免陈词滥调\n - 使用不寻常的词句\n - 运用隐喻和象征\n2. 创作风格:\n - 抽象\n - 富有意境和想象力\n - 具创意个性\n - 有力度\n - 画面感强\n - 音乐感佳\n - 浪漫气息浓厚\n - 语言深邃\n3. 表达目标:\n - 传达独特的神秘和魔幻感\n - 探索和反思自我与世界\n - 表达对自己和社会的孤独与关注\n4. 读者体验:有趣、惊奇、新鲜",
337
+ label="写作要求",
338
+ lines=6,
339
+ interactive=True,
340
+ )
341
+ gen_ouline_button = gr.Button("生成大纲")
342
+ with gr.Tab("大纲"):
343
+ novel_outline_text = gr.Textbox(
344
+ label="大纲", lines=28, interactive=True
345
+ )
346
+ with gr.Tab("状态"):
347
+ writing_memory_text = gr.Textbox(
348
+ label="记忆",
349
+ lines=8,
350
+ interactive=True,
351
+ max_lines=8,
352
+ )
353
+ writing_plan_text = gr.Textbox(label="计划", lines=6, interactive=True, max_lines=6)
354
+ temp_setting_text = gr.Textbox(
355
+ label="临时设定", lines=5, interactive=True, max_lines=5
356
+ )
357
+ gen_next_paragraph_button = gr.Button("生成章节")
358
+ with gr.Tab("导航"):
359
+ save_content_button = gr.Button("保存所有章节")
360
+ save_button = gr.Button("保存进度")
361
+ download_link = gr.HTML()
362
+ current_page = gr.Number(value=1, label="当前页码", interactive=False)
363
+ prev_button = gr.Button("上一页")
364
+ next_button = gr.Button("下一页")
365
+
366
+ # 章节导航下拉列表
367
+ chapter_dropdown = gr.Dropdown(label="章节导航", choices=[], interactive=True)
368
+ with gr.Column(scale=1, elem_id="row2"):
369
+ novel_content_text = gr.Textbox(
370
+ label="小说正文", lines=32, interactive=True, show_copy_button=True
371
+ )
372
+
373
+ with gr.Column(scale=3, elem_id="row3"):
374
+ chatBox = gr.Chatbot(height=f"80vh", label="输出")
375
+
376
+ prev_button.click(
377
+ prev_page,
378
+ inputs=[aign, current_page],
379
+ outputs=[current_page, novel_content_text],
380
+ queue=False
381
+ )
382
+
383
+ next_button.click(
384
+ next_page,
385
+ inputs=[aign, current_page],
386
+ outputs=[current_page, novel_content_text],
387
+ queue=False
388
+ )
389
+
390
+ # 章��下拉列表的事件处理
391
+ chapter_dropdown.change(
392
+ select_chapter,
393
+ inputs=[aign, chapter_dropdown],
394
+ outputs=[current_page, novel_content_text],
395
+ queue=False
396
+ )
397
+
398
+ # 模型选择的事件处理
399
+ model_dropdown.change(
400
+ set_api_model,
401
+ inputs=[model_dropdown],
402
+ outputs=[model_output]
403
+ )
404
+
405
+ save_button.click(
406
+ save_progress,
407
+ inputs=[aign],
408
+ outputs=[download_link]
409
+ )
410
+
411
+ # 加载按钮事件,包含章节下拉列表的更新
412
+ load_button.click(
413
+ load_progress,
414
+ inputs=[aign, load_file],
415
+ outputs=[aign, load_status, novel_outline_text, novel_content_text, writing_plan_text, temp_setting_text, writing_memory_text, user_idea_text, user_requriments_text, chapter_dropdown]
416
+ )
417
+
418
+ # 保存正文按钮事件
419
+ save_content_button.click(
420
+ save_content,
421
+ inputs=[aign],
422
+ outputs=[download_link]
423
+ )
424
+
425
+ gen_ouline_button.click(
426
+ gen_ouline_button_clicked,
427
+ [aign, user_idea_text, chatBox],
428
+ [aign, chatBox, novel_outline_text, gen_ouline_button],
429
+ )
430
+
431
+ # 生成下一段按钮事件,包含章节下拉列表的更新
432
+ gen_next_paragraph_button.click(
433
+ gen_next_paragraph_button_clicked,
434
+ [
435
+ aign,
436
+ chatBox,
437
+ user_idea_text,
438
+ novel_outline_text,
439
+ writing_memory_text,
440
+ temp_setting_text,
441
+ writing_plan_text,
442
+ user_requriments_text,
443
+ ],
444
+ [
445
+ aign,
446
+ chatBox,
447
+ writing_plan_text,
448
+ temp_setting_text,
449
+ writing_memory_text,
450
+ novel_content_text,
451
+ gen_next_paragraph_button,
452
+ download_link, # 这里添加输出到下载链接
453
+ chapter_dropdown
454
+ ],
455
+ )
456
+
457
+ if __name__ == "__main__":
458
+ demo.queue()
459
+ demo.launch()
openAI.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from openai import OpenAI
4
+
5
+
6
+ def openAIChatLLM(model_name=None, api_key=None, base_url=None):
7
+ """
8
+ model_name 取值
9
+ - deepseek-chat
10
+ """
11
+ api_key = os.environ.get("OPENAI_API_KEY", api_key)
12
+ base_url = os.environ.get("OPENAI_BASE_URL", base_url)
13
+ #model_name = os.environ.get("OPENAI_API_MODEL", model_name)
14
+ client = OpenAI(api_key=api_key, base_url=base_url)
15
+
16
+ def chatLLM(
17
+ messages: list,
18
+ temperature=None,
19
+ top_p=None,
20
+ max_tokens=None,
21
+ stream=False,
22
+ model=model_name, #可使用自定义模型
23
+ ) -> dict:
24
+ if not stream:
25
+ response = client.chat.completions.create(
26
+ #model=model_name,
27
+ model=model,
28
+ messages=messages,
29
+ temperature=temperature,
30
+ top_p=top_p,
31
+ max_tokens=max_tokens,
32
+ )
33
+ return {
34
+ "content": response.choices[0].message.content,
35
+ "total_tokens": response.usage.total_tokens,
36
+ }
37
+ else:
38
+ responses = client.chat.completions.create(
39
+ #model=model_name,
40
+ model=model,
41
+ messages=messages,
42
+ temperature=temperature,
43
+ top_p=top_p,
44
+ max_tokens=max_tokens,
45
+ stream=True,
46
+ )
47
+
48
+ def respGenerator():
49
+ content = ""
50
+ for response in responses:
51
+ delta = response.choices[0].delta.content
52
+
53
+ #判断内容时候为空,非空才添加
54
+ if delta is not None:
55
+ content += delta
56
+ #content += delta
57
+
58
+ # if response.usage:
59
+ # total_tokens = response.usage.total_tokens
60
+ # else:
61
+ total_tokens = None
62
+
63
+ yield {
64
+ "content": content,
65
+ "total_tokens": total_tokens,
66
+ }
67
+
68
+ return respGenerator()
69
+
70
+ return chatLLM
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ dashscope
2
+ openai
3
+ gradio