feature: check associated deliverable, and revise prompts for better consistancy
Browse files
src/chatbot/app_interface.py
CHANGED
|
@@ -59,7 +59,7 @@ def update_total_payment_amount_input(value):
|
|
| 59 |
return form_information
|
| 60 |
|
| 61 |
def update_expense_justification_input(value):
|
| 62 |
-
set_form_info("Expense
|
| 63 |
form_information = get_form_info()
|
| 64 |
return form_information
|
| 65 |
|
|
@@ -86,7 +86,7 @@ def update_form_information_handler(form_information):
|
|
| 86 |
if total_payment_amount_input and total_payment_amount_input != "None":
|
| 87 |
total_payment_amount_input = float(total_payment_amount_input)
|
| 88 |
|
| 89 |
-
expense_justification_input = form_information.get("Expense
|
| 90 |
expense_justification_input = None if expense_justification_input == "None" else expense_justification_input
|
| 91 |
|
| 92 |
# form_amount_input = form_information.get("Total Payment Amount", None)
|
|
@@ -114,8 +114,7 @@ def chat_handler(message, history):
|
|
| 114 |
if len(message['files']) > 0:
|
| 115 |
logger.info(f"Received file: {message['files'][0]}")
|
| 116 |
response = user_input_handler(str(message['files'][0]), CURRENT_SESSION_ID)
|
| 117 |
-
|
| 118 |
-
if message['text']!="":
|
| 119 |
try:
|
| 120 |
logger.info(f"Received text: {message['text']}")
|
| 121 |
response = user_input_handler(message['text'], CURRENT_SESSION_ID)
|
|
@@ -186,7 +185,7 @@ with gr.Blocks() as app:
|
|
| 186 |
gr.Markdown("บาท")
|
| 187 |
|
| 188 |
gr.Markdown("**การพิสูจน์ค่าใช้จ่าย**")
|
| 189 |
-
expense_justification_input = gr.Textbox(value=ast.literal_eval(form_info["Expense
|
| 190 |
|
| 191 |
submit_btn = gr.Button("Submit", variant="primary")
|
| 192 |
|
|
|
|
| 59 |
return form_information
|
| 60 |
|
| 61 |
def update_expense_justification_input(value):
|
| 62 |
+
set_form_info("Expense Description", value)
|
| 63 |
form_information = get_form_info()
|
| 64 |
return form_information
|
| 65 |
|
|
|
|
| 86 |
if total_payment_amount_input and total_payment_amount_input != "None":
|
| 87 |
total_payment_amount_input = float(total_payment_amount_input)
|
| 88 |
|
| 89 |
+
expense_justification_input = form_information.get("Expense Description", None)
|
| 90 |
expense_justification_input = None if expense_justification_input == "None" else expense_justification_input
|
| 91 |
|
| 92 |
# form_amount_input = form_information.get("Total Payment Amount", None)
|
|
|
|
| 114 |
if len(message['files']) > 0:
|
| 115 |
logger.info(f"Received file: {message['files'][0]}")
|
| 116 |
response = user_input_handler(str(message['files'][0]), CURRENT_SESSION_ID)
|
| 117 |
+
elif message['text']!="":
|
|
|
|
| 118 |
try:
|
| 119 |
logger.info(f"Received text: {message['text']}")
|
| 120 |
response = user_input_handler(message['text'], CURRENT_SESSION_ID)
|
|
|
|
| 185 |
gr.Markdown("บาท")
|
| 186 |
|
| 187 |
gr.Markdown("**การพิสูจน์ค่าใช้จ่าย**")
|
| 188 |
+
expense_justification_input = gr.Textbox(value=ast.literal_eval(form_info["Expense Description"]), label="", container=False, lines=4)
|
| 189 |
|
| 190 |
submit_btn = gr.Button("Submit", variant="primary")
|
| 191 |
|
src/chatbot/form_management/acceptance_criteria.py
CHANGED
|
@@ -66,12 +66,21 @@ acceptance_criteria = {
|
|
| 66 |
"exampleInput": "2000"
|
| 67 |
},
|
| 68 |
{
|
| 69 |
-
"fieldName": "
|
| 70 |
"priority": 8,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
"description": "The expense description pertaining to the project's scope.",
|
| 72 |
"validationRules": [
|
| 73 |
"Cannot be none.",
|
| 74 |
-
"Must provide a description for how the expense contributes towards the
|
| 75 |
],
|
| 76 |
"exampleInput": "ค่าจ้างแรงงาน ทำแนวกันไฟในจุดพื้นที่เสี่ยงต่อครั้งที่ 2-8 ทุก 10 วัน"
|
| 77 |
}
|
|
|
|
| 66 |
"exampleInput": "2000"
|
| 67 |
},
|
| 68 |
{
|
| 69 |
+
"fieldName": "Associated Deliverable",
|
| 70 |
"priority": 8,
|
| 71 |
+
"description": "The deliverable associated with the payment.",
|
| 72 |
+
"validationRules": [
|
| 73 |
+
"Cannot be none.",
|
| 74 |
+
],
|
| 75 |
+
"exampleInput": "ค่าจ้างแรงงาน ทำแนวกันไฟในจุดพื้นที่เสี่ยงต่อครั้งที่ 2-8 ทุก 10 วัน"
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"fieldName": "Expense Description",
|
| 79 |
+
"priority": 9,
|
| 80 |
"description": "The expense description pertaining to the project's scope.",
|
| 81 |
"validationRules": [
|
| 82 |
"Cannot be none.",
|
| 83 |
+
"Must provide a description for how the expense contributes towards the associated deliverable."
|
| 84 |
],
|
| 85 |
"exampleInput": "ค่าจ้างแรงงาน ทำแนวกันไฟในจุดพื้นที่เสี่ยงต่อครั้งที่ 2-8 ทุก 10 วัน"
|
| 86 |
}
|
src/chatbot/llm_engine.py
CHANGED
|
@@ -3,7 +3,11 @@
|
|
| 3 |
import os
|
| 4 |
from langchain_openai import ChatOpenAI
|
| 5 |
import openai
|
| 6 |
-
from prompts.chat_completion_prompts import
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
from datetime import datetime
|
| 9 |
|
|
@@ -44,4 +48,16 @@ def generate_expense_description_feedback(project_tasks: dict, expense_descripti
|
|
| 44 |
input=expense_description_feedback_prompt.format(project_tasks, expense_description)
|
| 45 |
)
|
| 46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
return response.output_text
|
|
|
|
| 3 |
import os
|
| 4 |
from langchain_openai import ChatOpenAI
|
| 5 |
import openai
|
| 6 |
+
from prompts.chat_completion_prompts import (
|
| 7 |
+
feedback_generation_prompt,
|
| 8 |
+
expense_description_feedback_prompt,
|
| 9 |
+
map_input_to_project_deliverables_prompt
|
| 10 |
+
)
|
| 11 |
from dotenv import load_dotenv
|
| 12 |
from datetime import datetime
|
| 13 |
|
|
|
|
| 48 |
input=expense_description_feedback_prompt.format(project_tasks, expense_description)
|
| 49 |
)
|
| 50 |
|
| 51 |
+
return response.output_text
|
| 52 |
+
|
| 53 |
+
def map_input_to_project_deliverable(user_input: str, project_deliverables: str) -> str:
|
| 54 |
+
"""
|
| 55 |
+
Map user input to project deliverables.
|
| 56 |
+
"""
|
| 57 |
+
response = client.responses.create(
|
| 58 |
+
model="gpt-4.1-mini",
|
| 59 |
+
temperature=0,
|
| 60 |
+
input=map_input_to_project_deliverables_prompt.format(user_input, project_deliverables)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
return response.output_text
|
src/chatbot/nodes.py
CHANGED
|
@@ -8,7 +8,12 @@ from langgraph.prebuilt import create_react_agent, ToolNode
|
|
| 8 |
from langgraph.checkpoint.memory import MemorySaver
|
| 9 |
|
| 10 |
# from chatbot.llm_engine import llm_overall_agent, generate_expense_info_feedback
|
| 11 |
-
from llm_engine import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
from form_management.acceptance_criteria import acceptance_criteria
|
| 13 |
from form_management.project_task_descriptions import project_tasks
|
| 14 |
from prompts.agent_prompts import speaker_system_message
|
|
@@ -16,6 +21,10 @@ from form_management.form_management import Form
|
|
| 16 |
from kie import receipt_kie
|
| 17 |
import ast
|
| 18 |
import json
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
# placeholder, init empty form info
|
| 21 |
form_info = {
|
|
@@ -31,9 +40,16 @@ form_info = {
|
|
| 31 |
# "Row Entries": [],
|
| 32 |
# },
|
| 33 |
# "Incompleteness Description": 'None',
|
| 34 |
-
"
|
|
|
|
| 35 |
}
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
def get_form_info():
|
| 38 |
return form_info
|
| 39 |
|
|
@@ -76,16 +92,16 @@ def apply_ocr(image_path: str) -> dict:
|
|
| 76 |
|
| 77 |
feedback = auditor_feedback(form_info)
|
| 78 |
|
| 79 |
-
print("feedback: {}".format(feedback))
|
| 80 |
|
| 81 |
return f"""
|
| 82 |
-
<
|
| 83 |
{form_info}
|
| 84 |
-
</
|
| 85 |
|
| 86 |
-
<
|
| 87 |
{feedback}
|
| 88 |
-
</
|
| 89 |
"""
|
| 90 |
|
| 91 |
@tool
|
|
@@ -103,19 +119,29 @@ def edit_form(key: str, value: str) -> dict:
|
|
| 103 |
form_info[key] = value
|
| 104 |
print(form_info)
|
| 105 |
print(f"Updated {key} to {value}")
|
| 106 |
-
if key == "Expense
|
| 107 |
-
feedback =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
else:
|
| 109 |
feedback = auditor_feedback(form_info)
|
|
|
|
|
|
|
| 110 |
return f"""
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
else:
|
| 120 |
print(form_info)
|
| 121 |
print(f"Invalid key: {key}. No changes made.")
|
|
@@ -128,7 +154,11 @@ def inspect_form() -> dict:
|
|
| 128 |
Use this when you need to address any inquiries the user might have about the current state of the form.
|
| 129 |
"""
|
| 130 |
print("using inspect_form tool")
|
| 131 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
tools = [apply_ocr, edit_form, inspect_form]
|
| 134 |
|
|
@@ -163,11 +193,17 @@ def speaker_chatbot_node(state: State):
|
|
| 163 |
|
| 164 |
# provides feedback based on form info
|
| 165 |
def auditor_feedback(form_info):
|
|
|
|
| 166 |
response = generate_expense_info_feedback(acceptance_criteria, form_info)
|
| 167 |
return response
|
| 168 |
|
| 169 |
-
def
|
| 170 |
-
response = generate_expense_description_feedback(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
return response
|
| 172 |
|
| 173 |
# # example usage
|
|
|
|
| 8 |
from langgraph.checkpoint.memory import MemorySaver
|
| 9 |
|
| 10 |
# from chatbot.llm_engine import llm_overall_agent, generate_expense_info_feedback
|
| 11 |
+
from llm_engine import (
|
| 12 |
+
llm_overall_agent,
|
| 13 |
+
generate_expense_info_feedback,
|
| 14 |
+
generate_expense_description_feedback,
|
| 15 |
+
map_input_to_project_deliverable
|
| 16 |
+
)
|
| 17 |
from form_management.acceptance_criteria import acceptance_criteria
|
| 18 |
from form_management.project_task_descriptions import project_tasks
|
| 19 |
from prompts.agent_prompts import speaker_system_message
|
|
|
|
| 21 |
from kie import receipt_kie
|
| 22 |
import ast
|
| 23 |
import json
|
| 24 |
+
import sqlite3
|
| 25 |
+
|
| 26 |
+
conn = sqlite3.connect(r'C:\Users\Gavin\Desktop\hello-earth\db\project_data.db')
|
| 27 |
+
cursor = conn.cursor()
|
| 28 |
|
| 29 |
# placeholder, init empty form info
|
| 30 |
form_info = {
|
|
|
|
| 40 |
# "Row Entries": [],
|
| 41 |
# },
|
| 42 |
# "Incompleteness Description": 'None',
|
| 43 |
+
"Associated Deliverable": 'None',
|
| 44 |
+
"Expense Description": 'None',
|
| 45 |
}
|
| 46 |
|
| 47 |
+
# define the project_deliverables from the db
|
| 48 |
+
cursor.execute("SELECT title FROM deliverables")
|
| 49 |
+
rows = cursor.fetchall()
|
| 50 |
+
deliverable_titles = [row[0] for row in rows]
|
| 51 |
+
print(f"deliverable_titles: {deliverable_titles}")
|
| 52 |
+
|
| 53 |
def get_form_info():
|
| 54 |
return form_info
|
| 55 |
|
|
|
|
| 92 |
|
| 93 |
feedback = auditor_feedback(form_info)
|
| 94 |
|
| 95 |
+
print("feedback: {} end of feedback".format(feedback))
|
| 96 |
|
| 97 |
return f"""
|
| 98 |
+
<form_status>
|
| 99 |
{form_info}
|
| 100 |
+
</form_status>
|
| 101 |
|
| 102 |
+
<feedback>
|
| 103 |
{feedback}
|
| 104 |
+
</feedback>
|
| 105 |
"""
|
| 106 |
|
| 107 |
@tool
|
|
|
|
| 119 |
form_info[key] = value
|
| 120 |
print(form_info)
|
| 121 |
print(f"Updated {key} to {value}")
|
| 122 |
+
if key == "Expense Description":
|
| 123 |
+
feedback = expense_description_feedback(value)
|
| 124 |
+
elif key == "Associated Deliverable":
|
| 125 |
+
res = map_associated_deliverable(value, deliverable_titles) # llm function that maps the user text to a project deliverable
|
| 126 |
+
if res == "No Matching Deliverable": # if no matching deliverable,
|
| 127 |
+
form_info[key] = "None" # set the associated deliverable to None
|
| 128 |
+
feedback = "A valid associated deliverable is required."
|
| 129 |
+
else:
|
| 130 |
+
form_info[key] = res
|
| 131 |
+
feedback = auditor_feedback(form_info)
|
| 132 |
else:
|
| 133 |
feedback = auditor_feedback(form_info)
|
| 134 |
+
|
| 135 |
+
print("feedback: {} end of feedback".format(feedback))
|
| 136 |
return f"""
|
| 137 |
+
<form_status>
|
| 138 |
+
{form_info}
|
| 139 |
+
</form_status>
|
| 140 |
|
| 141 |
+
<feedback>
|
| 142 |
+
{feedback}
|
| 143 |
+
</feedback>
|
| 144 |
+
"""
|
| 145 |
else:
|
| 146 |
print(form_info)
|
| 147 |
print(f"Invalid key: {key}. No changes made.")
|
|
|
|
| 154 |
Use this when you need to address any inquiries the user might have about the current state of the form.
|
| 155 |
"""
|
| 156 |
print("using inspect_form tool")
|
| 157 |
+
return """
|
| 158 |
+
<form_status>
|
| 159 |
+
{form_info}
|
| 160 |
+
</form_status>
|
| 161 |
+
"""
|
| 162 |
|
| 163 |
tools = [apply_ocr, edit_form, inspect_form]
|
| 164 |
|
|
|
|
| 193 |
|
| 194 |
# provides feedback based on form info
|
| 195 |
def auditor_feedback(form_info):
|
| 196 |
+
print("in auditor_feedback...")
|
| 197 |
response = generate_expense_info_feedback(acceptance_criteria, form_info)
|
| 198 |
return response
|
| 199 |
|
| 200 |
+
def expense_description_feedback(expense_description):
|
| 201 |
+
response = generate_expense_description_feedback(form_info["Associated Deliverable"], expense_description)
|
| 202 |
+
return response
|
| 203 |
+
|
| 204 |
+
# assume static project deliverables, would need project id to get the deliverables in actual case
|
| 205 |
+
def map_associated_deliverable(user_input, project_deliverables):
|
| 206 |
+
response = map_input_to_project_deliverable(user_input, str(project_deliverables))
|
| 207 |
return response
|
| 208 |
|
| 209 |
# # example usage
|
src/chatbot/prompts/agent_prompts.py
CHANGED
|
@@ -11,7 +11,8 @@ Buyer Name: The name of the person or entity making the payment.
|
|
| 11 |
Buyer Address: The address of the buyer.
|
| 12 |
Transaction Date: The date of the transaction.
|
| 13 |
Total Payment Amount: The total amount paid.
|
| 14 |
-
|
|
|
|
| 15 |
|
| 16 |
Users cannot add new fields to the form, but they can edit the existing fields.
|
| 17 |
Each time the form is revised, feedback will be provided, and your job to summarize current form information and convey the feedback to the user.
|
|
|
|
| 11 |
Buyer Address: The address of the buyer.
|
| 12 |
Transaction Date: The date of the transaction.
|
| 13 |
Total Payment Amount: The total amount paid.
|
| 14 |
+
Associated Deliverable: The deliverable associated with the payment.
|
| 15 |
+
Expense Description: The description of the expense as it pertains to the project.
|
| 16 |
|
| 17 |
Users cannot add new fields to the form, but they can edit the existing fields.
|
| 18 |
Each time the form is revised, feedback will be provided, and your job to summarize current form information and convey the feedback to the user.
|
src/chatbot/prompts/chat_completion_prompts.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
feedback_generation_prompt = """Provide feedback on the following form information based on the acceptance criteria.
|
| 2 |
-
The acceptance
|
| 3 |
|
| 4 |
"fieldName": "<the field name>",
|
| 5 |
"priority": <the priority number, where 1 is highest>,
|
|
@@ -10,9 +10,10 @@ The acceptance critiria has the following format:
|
|
| 10 |
],
|
| 11 |
"exampleInput": "<the example input for the field>"
|
| 12 |
|
| 13 |
-
|
|
|
|
| 14 |
Adhere strictly to the rules and provide feedback only for the fields that have violated their rules.
|
| 15 |
-
Only output the field name, the rule that was broken, and an explanation on why the value is invalid in the following format:
|
| 16 |
|
| 17 |
Field Name: <Field Name in question>
|
| 18 |
Broken Rule: <Rule that was broken>
|
|
@@ -22,23 +23,26 @@ If there are no violations, output "Form is complete and valid".
|
|
| 22 |
|
| 23 |
The current date is: {}.
|
| 24 |
|
| 25 |
-
|
| 26 |
-
{}
|
| 27 |
-
</Acceptance Criteria>
|
| 28 |
-
|
| 29 |
-
<Form Information>
|
| 30 |
-
{}
|
| 31 |
-
</Form Information>
|
| 32 |
-
|
| 33 |
|
|
|
|
| 34 |
"""
|
| 35 |
|
| 36 |
-
expense_description_feedback_prompt = """Determine whether the expense description is
|
| 37 |
-
Validity is determined by whether the expense description is relevant to the project
|
| 38 |
|
| 39 |
If the expense description is relevant, output "Form is complete and valid".
|
| 40 |
-
If the expense description is not relevant, provide your rationale on why it is not relevant.
|
|
|
|
| 41 |
|
| 42 |
-
|
| 43 |
Expense Description: {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 44 |
"""
|
|
|
|
| 1 |
feedback_generation_prompt = """Provide feedback on the following form information based on the acceptance criteria.
|
| 2 |
+
The acceptance criteria has the following format:
|
| 3 |
|
| 4 |
"fieldName": "<the field name>",
|
| 5 |
"priority": <the priority number, where 1 is highest>,
|
|
|
|
| 10 |
],
|
| 11 |
"exampleInput": "<the example input for the field>"
|
| 12 |
|
| 13 |
+
Determine the highest priority field with a violated rule.
|
| 14 |
+
Provide feedback of that field only.
|
| 15 |
Adhere strictly to the rules and provide feedback only for the fields that have violated their rules.
|
| 16 |
+
Only output the one field name, the rule that was broken, and an explanation on why the value is invalid in the following format:
|
| 17 |
|
| 18 |
Field Name: <Field Name in question>
|
| 19 |
Broken Rule: <Rule that was broken>
|
|
|
|
| 23 |
|
| 24 |
The current date is: {}.
|
| 25 |
|
| 26 |
+
acceptance_criteria: {}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
form_information: {}
|
| 29 |
"""
|
| 30 |
|
| 31 |
+
expense_description_feedback_prompt = """Determine whether the expense description is relevant to the associated project deliverable.
|
| 32 |
+
Validity is determined by whether the expense description is relevant to the associated project deliverable.
|
| 33 |
|
| 34 |
If the expense description is relevant, output "Form is complete and valid".
|
| 35 |
+
If the expense description is not relevant to the associated deliverable, provide your rationale on why it is not relevant.
|
| 36 |
+
If the associated deliverable is empty, output "A valid associated deliverable is required".
|
| 37 |
|
| 38 |
+
Associated Deliverable: {}
|
| 39 |
Expense Description: {}
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
map_input_to_project_deliverables_prompt = """Determine which, if any of the project deliverables the user input is related to.
|
| 43 |
+
If the user input is related to a project deliverable, output the deliverable title.
|
| 44 |
+
If the user input is not related to any project deliverable, output "No Matching Deliverable".
|
| 45 |
+
|
| 46 |
+
User Input: {}
|
| 47 |
+
Project Deliverables: {}
|
| 48 |
"""
|