Spaces:
Sleeping
Sleeping
Commit ·
5ff92b4
1
Parent(s): b528c27
feat: add display diff feature
Browse files- app.py +17 -8
- diff_text.py +80 -0
app.py
CHANGED
|
@@ -23,6 +23,7 @@ from json_repair import repair_json
|
|
| 23 |
import json_repair # enable streaming
|
| 24 |
|
| 25 |
from chat_history_manager import ChatHistoryManager
|
|
|
|
| 26 |
|
| 27 |
# Initialize OpenAI client
|
| 28 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
@@ -402,14 +403,17 @@ def handle_response(message, history, textbox_content):
|
|
| 402 |
handle_response.current_chat_id = chat_id
|
| 403 |
|
| 404 |
integrated_message = message
|
|
|
|
|
|
|
|
|
|
| 405 |
|
| 406 |
-
if not (message == CONVERSATION_STARTER or
|
| 407 |
integrated_message = f"""
|
| 408 |
用戶當前的需求:
|
| 409 |
{message}
|
| 410 |
|
| 411 |
用戶對您生成的教案進行了以下修改:
|
| 412 |
-
{
|
| 413 |
|
| 414 |
請根據用戶的需求和修改內容,更新教案,並依照步驟生成下一部分內容。
|
| 415 |
確保您:
|
|
@@ -426,7 +430,7 @@ def handle_response(message, history, textbox_content):
|
|
| 426 |
full_response = ""
|
| 427 |
current_lesson_plan = ""
|
| 428 |
suggestion = ""
|
| 429 |
-
next_step_prompt = [[]]
|
| 430 |
|
| 431 |
try:
|
| 432 |
with client.beta.threads.runs.stream(
|
|
@@ -454,7 +458,7 @@ def handle_response(message, history, textbox_content):
|
|
| 454 |
# for text in stream.text_deltas:
|
| 455 |
# full_response += text
|
| 456 |
|
| 457 |
-
# Debug: Print accumulated response
|
| 458 |
print(f"Accumulated response length: {len(full_response)}", flush=True)
|
| 459 |
|
| 460 |
# Skip if response is too short to be valid JSON
|
|
@@ -500,8 +504,13 @@ def handle_response(message, history, textbox_content):
|
|
| 500 |
msg_records.append({'role': 'user', 'content': message})
|
| 501 |
msg_records.append({'role': 'assistant', 'content': suggestion})
|
| 502 |
chat_manager.save_chat(user_id, chat_id, msg_records, current_lesson_plan)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 503 |
|
| 504 |
-
yield suggestion,
|
| 505 |
|
| 506 |
|
| 507 |
def handle_quick_response_click(selected):
|
|
@@ -538,11 +547,11 @@ def clear_input():
|
|
| 538 |
return gr.update(value="")
|
| 539 |
|
| 540 |
# Handle chat submission
|
| 541 |
-
def chat_submit(message, history):
|
| 542 |
if message:
|
| 543 |
msg_records = [{'role': msg['role'], 'content': msg['content']} for msg in history]
|
| 544 |
msg_records.append({'role': 'user', 'content': message})
|
| 545 |
-
response_generator = handle_response(message, history, textbox
|
| 546 |
for suggestion, current_lesson_plan, next_step_prompt in response_generator:
|
| 547 |
yield msg_records + [{'role': 'assistant', 'content': suggestion}], current_lesson_plan, [[]]
|
| 548 |
yield msg_records + [{'role': 'assistant', 'content': suggestion}], current_lesson_plan, next_step_prompt
|
|
@@ -588,7 +597,7 @@ with gr.Blocks(css="""
|
|
| 588 |
|
| 589 |
prompt_input.submit(
|
| 590 |
chat_submit,
|
| 591 |
-
inputs=[prompt_input, chatbot],
|
| 592 |
outputs=[chatbot, textbox, hidden_list]
|
| 593 |
).then(
|
| 594 |
clear_input,
|
|
|
|
| 23 |
import json_repair # enable streaming
|
| 24 |
|
| 25 |
from chat_history_manager import ChatHistoryManager
|
| 26 |
+
from diff_text import compare_text, extract_modified_sections
|
| 27 |
|
| 28 |
# Initialize OpenAI client
|
| 29 |
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
|
|
|
|
| 403 |
handle_response.current_chat_id = chat_id
|
| 404 |
|
| 405 |
integrated_message = message
|
| 406 |
+
|
| 407 |
+
just_modifications = extract_modified_sections(textbox_content)
|
| 408 |
+
prev_lesson_plan = just_modifications if just_modifications else textbox_content
|
| 409 |
|
| 410 |
+
if not (message == CONVERSATION_STARTER or prev_lesson_plan == ""):
|
| 411 |
integrated_message = f"""
|
| 412 |
用戶當前的需求:
|
| 413 |
{message}
|
| 414 |
|
| 415 |
用戶對您生成的教案進行了以下修改:
|
| 416 |
+
{prev_lesson_plan}
|
| 417 |
|
| 418 |
請根據用戶的需求和修改內容,更新教案,並依照步驟生成下一部分內容。
|
| 419 |
確保您:
|
|
|
|
| 430 |
full_response = ""
|
| 431 |
current_lesson_plan = ""
|
| 432 |
suggestion = ""
|
| 433 |
+
next_step_prompt = [[]]
|
| 434 |
|
| 435 |
try:
|
| 436 |
with client.beta.threads.runs.stream(
|
|
|
|
| 458 |
# for text in stream.text_deltas:
|
| 459 |
# full_response += text
|
| 460 |
|
| 461 |
+
# Debug: Print accumulated response length
|
| 462 |
print(f"Accumulated response length: {len(full_response)}", flush=True)
|
| 463 |
|
| 464 |
# Skip if response is too short to be valid JSON
|
|
|
|
| 504 |
msg_records.append({'role': 'user', 'content': message})
|
| 505 |
msg_records.append({'role': 'assistant', 'content': suggestion})
|
| 506 |
chat_manager.save_chat(user_id, chat_id, msg_records, current_lesson_plan)
|
| 507 |
+
|
| 508 |
+
|
| 509 |
+
# Compare textbox_content with current_lesson_plan to show differences
|
| 510 |
+
compared_lesson_plan = compare_text(prev_lesson_plan, current_lesson_plan) if prev_lesson_plan else current_lesson_plan
|
| 511 |
+
|
| 512 |
|
| 513 |
+
yield suggestion, compared_lesson_plan, next_step_prompt
|
| 514 |
|
| 515 |
|
| 516 |
def handle_quick_response_click(selected):
|
|
|
|
| 547 |
return gr.update(value="")
|
| 548 |
|
| 549 |
# Handle chat submission
|
| 550 |
+
def chat_submit(message, history, textbox):
|
| 551 |
if message:
|
| 552 |
msg_records = [{'role': msg['role'], 'content': msg['content']} for msg in history]
|
| 553 |
msg_records.append({'role': 'user', 'content': message})
|
| 554 |
+
response_generator = handle_response(message, history, textbox)
|
| 555 |
for suggestion, current_lesson_plan, next_step_prompt in response_generator:
|
| 556 |
yield msg_records + [{'role': 'assistant', 'content': suggestion}], current_lesson_plan, [[]]
|
| 557 |
yield msg_records + [{'role': 'assistant', 'content': suggestion}], current_lesson_plan, next_step_prompt
|
|
|
|
| 597 |
|
| 598 |
prompt_input.submit(
|
| 599 |
chat_submit,
|
| 600 |
+
inputs=[prompt_input, chatbot, textbox],
|
| 601 |
outputs=[chatbot, textbox, hidden_list]
|
| 602 |
).then(
|
| 603 |
clear_input,
|
diff_text.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# from difflib import unified_diff
|
| 2 |
+
from difflib import SequenceMatcher
|
| 3 |
+
|
| 4 |
+
SEPERATOR = "\n\n\n=====# 教案變更=====\n\n\n"
|
| 5 |
+
|
| 6 |
+
def compare_text(text1, text2):
|
| 7 |
+
"""
|
| 8 |
+
Compare two text strings and return their differences in a human-readable format.
|
| 9 |
+
|
| 10 |
+
Args:
|
| 11 |
+
text1 (str): Original text string
|
| 12 |
+
text2 (str): New text string
|
| 13 |
+
|
| 14 |
+
Returns:
|
| 15 |
+
str: Human-readable formatted differences
|
| 16 |
+
"""
|
| 17 |
+
if not text1:
|
| 18 |
+
return text2
|
| 19 |
+
|
| 20 |
+
if text1 == text2:
|
| 21 |
+
return text2
|
| 22 |
+
|
| 23 |
+
# Split the texts into lines
|
| 24 |
+
text1_lines = text1.splitlines()
|
| 25 |
+
text2_lines = text2.splitlines()
|
| 26 |
+
|
| 27 |
+
# Create a more readable diff output
|
| 28 |
+
output = []
|
| 29 |
+
# Add the full text2 content
|
| 30 |
+
output.append(text2)
|
| 31 |
+
|
| 32 |
+
# Add a separator line
|
| 33 |
+
output.append(SEPERATOR)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
matcher = SequenceMatcher(None, text1_lines, text2_lines)
|
| 37 |
+
for op, i1, i2, j1, j2 in matcher.get_opcodes():
|
| 38 |
+
# if op == 'equal':
|
| 39 |
+
# # For unchanged sections, just add them without markers
|
| 40 |
+
# for line in text2_lines[j1:j2]:
|
| 41 |
+
# output.append(line)
|
| 42 |
+
if op == 'replace':
|
| 43 |
+
# Show both removed and added content
|
| 44 |
+
output.append("\n【修改前】")
|
| 45 |
+
for line in text1_lines[i1:i2]:
|
| 46 |
+
output.append(f"- {line}")
|
| 47 |
+
output.append("\n【修改後】")
|
| 48 |
+
for line in text2_lines[j1:j2]:
|
| 49 |
+
output.append(f"+ {line}")
|
| 50 |
+
elif op == 'delete':
|
| 51 |
+
# Show removed content
|
| 52 |
+
output.append("\n【已移除】")
|
| 53 |
+
for line in text1_lines[i1:i2]:
|
| 54 |
+
output.append(f"- {line}")
|
| 55 |
+
elif op == 'insert':
|
| 56 |
+
# Show added content
|
| 57 |
+
output.append("\n【新增內容】")
|
| 58 |
+
for line in text2_lines[j1:j2]:
|
| 59 |
+
output.append(f"+ {line}")
|
| 60 |
+
|
| 61 |
+
return "\n".join(output)
|
| 62 |
+
|
| 63 |
+
def extract_modified_sections(compared_text):
|
| 64 |
+
"""
|
| 65 |
+
Extract the part of the compared text that appears before the SEPERATOR line.
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
compared_text (str): The text returned by compare_text function
|
| 69 |
+
|
| 70 |
+
Returns:
|
| 71 |
+
str: The content before the SEPERATOR line
|
| 72 |
+
"""
|
| 73 |
+
if not compared_text:
|
| 74 |
+
return ""
|
| 75 |
+
|
| 76 |
+
# Split by the separator and return the first part
|
| 77 |
+
parts = compared_text.split(SEPERATOR, 1)
|
| 78 |
+
if len(parts) > 0:
|
| 79 |
+
return parts[0]
|
| 80 |
+
return ""
|