Spaces:
Sleeping
Sleeping
Commit
Β·
e83ea81
1
Parent(s):
f54fc8b
sdlc 80 percent
Browse files- src/langgraphagenticai/graph/graph_builder.py +142 -10
- src/langgraphagenticai/main.py +4 -1
- src/langgraphagenticai/node/sdlc_node.py +399 -27
- src/langgraphagenticai/state/state.py +40 -5
- src/langgraphagenticai/ui/streamlitui/display_result.py +103 -14
- src/langgraphagenticai/ui/streamlitui/loadui.py +45 -6
- src/langgraphagenticai/ui/streamlitui/sdlcfeedback.py +234 -0
src/langgraphagenticai/graph/graph_builder.py
CHANGED
|
@@ -2,7 +2,11 @@ from langgraph.graph import StateGraph, END, MessagesState
|
|
| 2 |
from langgraph.prebuilt import tools_condition,ToolNode
|
| 3 |
from langchain_core.prompts import ChatPromptTemplate
|
| 4 |
import datetime
|
|
|
|
|
|
|
|
|
|
| 5 |
#module import
|
|
|
|
| 6 |
from src.langgraphagenticai.node.sdlc_node import SDLCNode
|
| 7 |
from src.langgraphagenticai.node.ai_news_node import AINewsNode
|
| 8 |
from src.langgraphagenticai.node import travel_planner_node
|
|
@@ -150,21 +154,147 @@ class GraphBuilder:
|
|
| 150 |
self.graph_builder.add_edge("fetch_news", "summarize_news")
|
| 151 |
self.graph_builder.add_edge("summarize_news", "save_result")
|
| 152 |
self.graph_builder.add_edge("save_result", END)
|
| 153 |
-
|
| 154 |
-
|
| 155 |
def sdlc_workflow_build_graph(self):
|
| 156 |
-
|
| 157 |
sdlc_wf_node = SDLCNode(self.llm)
|
| 158 |
-
|
| 159 |
self.graph_builder = self.sdlc_graph_builder
|
|
|
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 168 |
|
| 169 |
|
| 170 |
def setup_graph(self, usecase: str):
|
|
@@ -184,7 +314,9 @@ class GraphBuilder:
|
|
| 184 |
elif usecase =="AI News":
|
| 185 |
self.ai_news_build_graph()
|
| 186 |
elif usecase =="SDLC Workflow":
|
|
|
|
| 187 |
self.sdlc_workflow_build_graph()
|
|
|
|
| 188 |
else:
|
| 189 |
raise ValueError("Invalid use case selected.")
|
| 190 |
return self.graph_builder.compile()
|
|
|
|
| 2 |
from langgraph.prebuilt import tools_condition,ToolNode
|
| 3 |
from langchain_core.prompts import ChatPromptTemplate
|
| 4 |
import datetime
|
| 5 |
+
from langgraph.types import interrupt, Command
|
| 6 |
+
from langgraph.checkpoint.memory import MemorySaver
|
| 7 |
+
import streamlit as st
|
| 8 |
#module import
|
| 9 |
+
|
| 10 |
from src.langgraphagenticai.node.sdlc_node import SDLCNode
|
| 11 |
from src.langgraphagenticai.node.ai_news_node import AINewsNode
|
| 12 |
from src.langgraphagenticai.node import travel_planner_node
|
|
|
|
| 154 |
self.graph_builder.add_edge("fetch_news", "summarize_news")
|
| 155 |
self.graph_builder.add_edge("summarize_news", "save_result")
|
| 156 |
self.graph_builder.add_edge("save_result", END)
|
| 157 |
+
|
| 158 |
+
|
| 159 |
def sdlc_workflow_build_graph(self):
|
|
|
|
| 160 |
sdlc_wf_node = SDLCNode(self.llm)
|
|
|
|
| 161 |
self.graph_builder = self.sdlc_graph_builder
|
| 162 |
+
try:
|
| 163 |
|
| 164 |
+
# Add all primary workflow nodes.
|
| 165 |
+
nodes = [
|
| 166 |
+
("generate_user_stories", sdlc_wf_node.generate_user_stories),
|
| 167 |
+
("product_owner_review", sdlc_wf_node.product_owner_review),
|
| 168 |
+
("create_design_docs", sdlc_wf_node.create_design_docs),
|
| 169 |
+
("revise_user_stories", sdlc_wf_node.revise_user_stories),
|
| 170 |
+
("design_review", sdlc_wf_node.design_review),
|
| 171 |
+
("generate_code", sdlc_wf_node.generate_code),
|
| 172 |
+
("code_review", sdlc_wf_node.code_review),
|
| 173 |
+
("security_review", sdlc_wf_node.security_review),
|
| 174 |
+
("fix_code_after_code_review", sdlc_wf_node.fix_code_after_code_review),
|
| 175 |
+
("fix_code_after_security", sdlc_wf_node.fix_code_after_security),
|
| 176 |
+
("write_test_cases", sdlc_wf_node.write_test_cases),
|
| 177 |
+
("test_cases_review", sdlc_wf_node.test_cases_review),
|
| 178 |
+
("fix_test_cases", sdlc_wf_node.fix_test_cases),
|
| 179 |
+
]
|
| 180 |
|
| 181 |
+
# Helper functions to wrap review nodes.
|
| 182 |
+
def human_loop_node(review_field):
|
| 183 |
+
def node(state):
|
| 184 |
+
# Trigger an interrupt to surface the LLM-generated review.
|
| 185 |
+
if st.session_state.user_decision is not "approve":
|
| 186 |
+
value = interrupt({
|
| 187 |
+
"__interrupt__": True,
|
| 188 |
+
"review": state.get(review_field, ""),
|
| 189 |
+
"instruction": f"Please review the '{review_field}'. Approve or provide feedback to reject."
|
| 190 |
+
})
|
| 191 |
+
else :
|
| 192 |
+
value = st.session_state.user_decision
|
| 193 |
+
st.session_state.user_decision = ''
|
| 194 |
+
return {"human_decision": value}
|
| 195 |
+
return node
|
| 196 |
+
|
| 197 |
+
def decision_node(previous_node):
|
| 198 |
+
def node(state):
|
| 199 |
+
if state.get("human_decision") == "approve":
|
| 200 |
+
state["decision"] = "approve"
|
| 201 |
+
else:
|
| 202 |
+
state["decision"] = "reject"
|
| 203 |
+
state["feedback"] = state.get("human_decision")
|
| 204 |
+
return state
|
| 205 |
+
return node
|
| 206 |
+
|
| 207 |
+
review_nodes = ["product_owner_review", "design_review", "code_review", "test_cases_review"]
|
| 208 |
+
additional_nodes = []
|
| 209 |
+
for review in review_nodes:
|
| 210 |
+
additional_nodes.append((f"human_loop_{review}", human_loop_node(review)))
|
| 211 |
+
# Set the previous node for rejection (adjust as needed):
|
| 212 |
+
if review == "product_owner_review":
|
| 213 |
+
prev = "generate_user_stories"
|
| 214 |
+
elif review == "design_review":
|
| 215 |
+
prev = "revise_user_stories"
|
| 216 |
+
elif review == "code_review":
|
| 217 |
+
prev = "generate_code"
|
| 218 |
+
elif review == "test_cases_review":
|
| 219 |
+
prev = "write_test_cases"
|
| 220 |
+
additional_nodes.append((f"decision_{review}", decision_node(prev)))
|
| 221 |
+
|
| 222 |
+
# Add all nodes to the graph.
|
| 223 |
+
for node_name, node_func in nodes + additional_nodes:
|
| 224 |
+
self.graph_builder.add_node(node_name, node_func)
|
| 225 |
+
|
| 226 |
+
# Set entry point.
|
| 227 |
+
if st.session_state.graph_stage == 'resumed':
|
| 228 |
+
self.graph_builder.set_entry_point(st.session_state.state['current_step'])
|
| 229 |
+
else:
|
| 230 |
+
self.graph_builder.set_entry_point("generate_user_stories")
|
| 231 |
+
|
| 232 |
+
# ---- Build Flow Edges ----
|
| 233 |
+
|
| 234 |
+
# Wrap product_owner_review:
|
| 235 |
+
self.graph_builder.add_edge("generate_user_stories", "product_owner_review")
|
| 236 |
+
self.graph_builder.add_edge("product_owner_review", "human_loop_product_owner_review")
|
| 237 |
+
self.graph_builder.add_edge("human_loop_product_owner_review", "decision_product_owner_review")
|
| 238 |
+
self.graph_builder.add_conditional_edges(
|
| 239 |
+
"decision_product_owner_review",
|
| 240 |
+
lambda state: "approve" if state.get("decision") == "approve" else "reject",
|
| 241 |
+
{
|
| 242 |
+
"approve": "create_design_docs",
|
| 243 |
+
"reject": "generate_user_stories"
|
| 244 |
+
}
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
# Wrap design_review:
|
| 248 |
+
self.graph_builder.add_edge("revise_user_stories", "design_review")
|
| 249 |
+
self.graph_builder.add_edge("design_review", "human_loop_design_review")
|
| 250 |
+
self.graph_builder.add_edge("human_loop_design_review", "decision_design_review")
|
| 251 |
+
self.graph_builder.add_conditional_edges(
|
| 252 |
+
"decision_design_review",
|
| 253 |
+
lambda state: "approve" if state.get("decision") == "approve" else "reject",
|
| 254 |
+
{
|
| 255 |
+
"approve": "generate_code",
|
| 256 |
+
"reject": "revise_user_stories"
|
| 257 |
+
}
|
| 258 |
+
)
|
| 259 |
+
|
| 260 |
+
# Wrap code_review:
|
| 261 |
+
self.graph_builder.add_edge("generate_code", "code_review")
|
| 262 |
+
self.graph_builder.add_edge("code_review", "human_loop_code_review")
|
| 263 |
+
self.graph_builder.add_edge("human_loop_code_review", "decision_code_review")
|
| 264 |
+
self.graph_builder.add_conditional_edges(
|
| 265 |
+
"decision_code_review",
|
| 266 |
+
lambda state: "approve" if state.get("decision") == "approve" else "reject",
|
| 267 |
+
{
|
| 268 |
+
"approve": "security_review",
|
| 269 |
+
"reject": "generate_code"
|
| 270 |
+
}
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
# Wrap test_cases_review:
|
| 274 |
+
self.graph_builder.add_edge("write_test_cases", "test_cases_review")
|
| 275 |
+
self.graph_builder.add_edge("test_cases_review", "human_loop_test_cases_review")
|
| 276 |
+
self.graph_builder.add_edge("human_loop_test_cases_review", "decision_test_cases_review")
|
| 277 |
+
self.graph_builder.add_conditional_edges(
|
| 278 |
+
"decision_test_cases_review",
|
| 279 |
+
lambda state: "approve" if state.get("decision") == "approve" else "reject",
|
| 280 |
+
{
|
| 281 |
+
"approve": "fix_test_cases",
|
| 282 |
+
"reject": "write_test_cases"
|
| 283 |
+
}
|
| 284 |
+
)
|
| 285 |
+
|
| 286 |
+
# Other sequential edges.
|
| 287 |
+
self.graph_builder.add_edge("create_design_docs", "revise_user_stories")
|
| 288 |
+
self.graph_builder.add_edge("security_review", "fix_code_after_code_review")
|
| 289 |
+
self.graph_builder.add_edge("fix_code_after_code_review", "fix_code_after_security")
|
| 290 |
+
self.graph_builder.add_edge("fix_code_after_security", "write_test_cases")
|
| 291 |
+
|
| 292 |
+
# Set finish point at the end of the workflow.
|
| 293 |
+
self.graph_builder.set_finish_point("fix_test_cases")
|
| 294 |
+
except Exception as e:
|
| 295 |
+
print(e)
|
| 296 |
+
|
| 297 |
+
self.graph_builder
|
| 298 |
|
| 299 |
|
| 300 |
def setup_graph(self, usecase: str):
|
|
|
|
| 314 |
elif usecase =="AI News":
|
| 315 |
self.ai_news_build_graph()
|
| 316 |
elif usecase =="SDLC Workflow":
|
| 317 |
+
checkpointer = MemorySaver()
|
| 318 |
self.sdlc_workflow_build_graph()
|
| 319 |
+
return self.graph_builder.compile(checkpointer=checkpointer)
|
| 320 |
else:
|
| 321 |
raise ValueError("Invalid use case selected.")
|
| 322 |
return self.graph_builder.compile()
|
src/langgraphagenticai/main.py
CHANGED
|
@@ -18,6 +18,7 @@ def load_langgraph_agenticai_app():
|
|
| 18 |
implementing exception handling for robustness.
|
| 19 |
"""
|
| 20 |
try:
|
|
|
|
| 21 |
# Load UI
|
| 22 |
ui = LoadStreamlitUI()
|
| 23 |
user_input = ui.load_streamlit_ui()
|
|
@@ -25,11 +26,13 @@ def load_langgraph_agenticai_app():
|
|
| 25 |
if not user_input:
|
| 26 |
st.error("Error: Failed to load user input from the UI.")
|
| 27 |
return
|
|
|
|
|
|
|
| 28 |
|
| 29 |
# Text input for user message
|
| 30 |
if st.session_state.IsFetchButtonClicked:
|
| 31 |
user_message = st.session_state.timeframe
|
| 32 |
-
elif st.session_state.IsSDLC :
|
| 33 |
user_message = st.session_state.state
|
| 34 |
else :
|
| 35 |
user_message = st.chat_input("Enter your message:")
|
|
|
|
| 18 |
implementing exception handling for robustness.
|
| 19 |
"""
|
| 20 |
try:
|
| 21 |
+
|
| 22 |
# Load UI
|
| 23 |
ui = LoadStreamlitUI()
|
| 24 |
user_input = ui.load_streamlit_ui()
|
|
|
|
| 26 |
if not user_input:
|
| 27 |
st.error("Error: Failed to load user input from the UI.")
|
| 28 |
return
|
| 29 |
+
|
| 30 |
+
user_message = ''
|
| 31 |
|
| 32 |
# Text input for user message
|
| 33 |
if st.session_state.IsFetchButtonClicked:
|
| 34 |
user_message = st.session_state.timeframe
|
| 35 |
+
elif st.session_state.IsSDLC or 'current_step' in st.session_state.state:
|
| 36 |
user_message = st.session_state.state
|
| 37 |
else :
|
| 38 |
user_message = st.chat_input("Enter your message:")
|
src/langgraphagenticai/node/sdlc_node.py
CHANGED
|
@@ -1,38 +1,410 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
class SDLCNode:
|
| 4 |
def __init__(self, llm):
|
| 5 |
self.llm = llm
|
|
|
|
| 6 |
|
| 7 |
-
def
|
| 8 |
-
prompt = f"""
|
| 9 |
-
Generate comprehensive user stories based on the following requirements:
|
| 10 |
-
{state['requirements']}
|
| 11 |
-
|
| 12 |
-
{f"PO Feedback to incorporate: {state['po_feedback']}" if state.get('po_feedback') else ""}
|
| 13 |
-
|
| 14 |
-
Format the user stories in Markdown with clear acceptance criteria.
|
| 15 |
"""
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
-
def
|
| 24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
-
def
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sdlc_node.py
|
| 2 |
import streamlit as st
|
| 3 |
+
import logging
|
| 4 |
+
from typing import Dict, Any
|
| 5 |
+
|
| 6 |
+
from src.langgraphagenticai.ui.streamlitui.sdlcfeedback import SDLCUI
|
| 7 |
+
|
| 8 |
+
# Configure logging for production (adjust level and handlers as necessary)
|
| 9 |
+
logging.basicConfig(level=logging.INFO)
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
|
| 13 |
class SDLCNode:
|
| 14 |
def __init__(self, llm):
|
| 15 |
self.llm = llm
|
| 16 |
+
self.logger = logging.getLogger(self.__class__.__name__)
|
| 17 |
|
| 18 |
+
def _update_state(self, updates: Dict[str, Any]) -> None:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
"""
|
| 20 |
+
Safely update st.session_state['state'] with provided updates.
|
| 21 |
+
"""
|
| 22 |
+
try:
|
| 23 |
+
if 'state' not in st.session_state:
|
| 24 |
+
st.session_state['state'] = {}
|
| 25 |
+
st.session_state['state'].update(updates)
|
| 26 |
+
except Exception as e:
|
| 27 |
+
self.logger.exception("Failed to update session state.")
|
| 28 |
+
st.error(f"Error updating session state: {e}")
|
| 29 |
|
| 30 |
+
def refresh_ui(self):
|
| 31 |
+
try:
|
| 32 |
+
ui = SDLCUI()
|
| 33 |
+
ui.render()
|
| 34 |
+
except Exception as e:
|
| 35 |
+
self.logger.exception("Error refreshing UI.")
|
| 36 |
+
st.error(f"UI Refresh failed: {e}")
|
| 37 |
|
| 38 |
+
def generate_user_stories(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 39 |
+
if st.session_state.state['user_stories'] is not '':
|
| 40 |
+
return {"user_stories": st.session_state.state['user_stories']}
|
| 41 |
+
try:
|
| 42 |
+
prompt = f"""
|
| 43 |
+
Generate comprehensive user stories based on these requirements:
|
| 44 |
+
{state.get('requirements', 'No requirements provided')}
|
| 45 |
+
|
| 46 |
+
{f"PO Feedback to incorporate: {state.get('po_feedback', '')}" if state.get('po_feedback') else ""}
|
| 47 |
+
|
| 48 |
+
Format as Markdown with clear acceptance criteria.
|
| 49 |
+
"""
|
| 50 |
+
response = self.llm.invoke(prompt)
|
| 51 |
+
content = getattr(response, 'content', None)
|
| 52 |
+
if content is None:
|
| 53 |
+
raise ValueError("LLM response does not contain content.")
|
| 54 |
+
self._update_state({
|
| 55 |
+
"user_stories": content,
|
| 56 |
+
"current_result": content,
|
| 57 |
+
"current_step": "product_owner_review"
|
| 58 |
+
})
|
| 59 |
+
return {"user_stories": content}
|
| 60 |
+
except Exception as e:
|
| 61 |
+
self.logger.exception("Error in generate_user_stories.")
|
| 62 |
+
st.error(f"Failed to generate user stories: {e}")
|
| 63 |
+
return {"error": str(e)}
|
| 64 |
+
|
| 65 |
+
def product_owner_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 66 |
+
if st.session_state.state['po_feedback'] is not '':
|
| 67 |
+
return {"po_feedback": st.session_state.state['po_feedback']}
|
| 68 |
|
| 69 |
+
try:
|
| 70 |
+
prompt = f"""
|
| 71 |
+
Please review the following user stories and provide your feedback.
|
| 72 |
+
|
| 73 |
+
User Stories:
|
| 74 |
+
{state.get('user_stories', 'No user stories available')}
|
| 75 |
+
|
| 76 |
+
If you approve, simply type "approve". Otherwise, provide detailed feedback.
|
| 77 |
+
"""
|
| 78 |
+
response = self.llm.invoke(prompt)
|
| 79 |
+
content = getattr(response, 'content', None)
|
| 80 |
+
if content is None:
|
| 81 |
+
raise ValueError("LLM response does not contain content.")
|
| 82 |
+
self._update_state({
|
| 83 |
+
"po_feedback": content,
|
| 84 |
+
"current_result": content,
|
| 85 |
+
"current_step": "create_design_docs" # Updated to next node
|
| 86 |
+
})
|
| 87 |
+
st.session_state.user_decision = None
|
| 88 |
+
return {"po_feedback": content}
|
| 89 |
+
except Exception as e:
|
| 90 |
+
self.logger.exception("Error in product_owner_review.")
|
| 91 |
+
st.error(f"Product owner review failed: {e}")
|
| 92 |
+
return {"error": str(e)}
|
| 93 |
+
|
| 94 |
+
def create_design_docs(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 95 |
+
if st.session_state.state['design_docs'] is not '':
|
| 96 |
+
return {"design_docs": st.session_state.state['design_docs']}
|
| 97 |
+
try:
|
| 98 |
+
prompt = f"""
|
| 99 |
+
Create comprehensive design documents for the following user stories:
|
| 100 |
+
{state.get('user_stories', 'No user stories available')}
|
| 101 |
+
|
| 102 |
+
Include both functional and technical specifications.
|
| 103 |
+
Provide architecture diagrams in Mermaid format.
|
| 104 |
+
"""
|
| 105 |
+
response = self.llm.invoke(prompt)
|
| 106 |
+
content = getattr(response, 'content', None)
|
| 107 |
+
if content is None:
|
| 108 |
+
raise ValueError("LLM response does not contain content.")
|
| 109 |
+
self._update_state({
|
| 110 |
+
"design_docs": content,
|
| 111 |
+
"current_result": content,
|
| 112 |
+
"current_step": "revise_user_stories" # Updated to next node
|
| 113 |
+
})
|
| 114 |
+
return {"design_docs": content}
|
| 115 |
+
except Exception as e:
|
| 116 |
+
self.logger.exception("Error in create_design_docs.")
|
| 117 |
+
st.error(f"Failed to create design documents: {e}")
|
| 118 |
+
return {"error": str(e)}
|
| 119 |
+
|
| 120 |
+
def revise_user_stories(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 121 |
+
if st.session_state.state['user_stories'] is not '':
|
| 122 |
+
return {"user_stories": st.session_state.state['user_stories']}
|
| 123 |
+
try:
|
| 124 |
+
prompt = f"""
|
| 125 |
+
Revise the user stories based on the following Product Owner feedback:
|
| 126 |
+
|
| 127 |
+
Original Stories: {state.get('user_stories', 'No stories generated')}
|
| 128 |
+
Feedback: {state.get('po_feedback', 'No feedback provided')}
|
| 129 |
+
|
| 130 |
+
Please maintain Markdown format and include clear acceptance criteria.
|
| 131 |
+
"""
|
| 132 |
+
response = self.llm.invoke(prompt)
|
| 133 |
+
content = getattr(response, 'content', None)
|
| 134 |
+
if content is None:
|
| 135 |
+
raise ValueError("LLM response does not contain content.")
|
| 136 |
+
self._update_state({
|
| 137 |
+
"user_stories": content,
|
| 138 |
+
"current_result": content,
|
| 139 |
+
"current_step": "design_review" # Updated to next node
|
| 140 |
+
})
|
| 141 |
+
return {"user_stories": content}
|
| 142 |
+
except Exception as e:
|
| 143 |
+
self.logger.exception("Error in revise_user_stories.")
|
| 144 |
+
st.error(f"Failed to revise user stories: {e}")
|
| 145 |
+
return {"error": str(e)}
|
| 146 |
+
|
| 147 |
+
def design_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 148 |
+
if st.session_state.state['design_feedback'] is not '':
|
| 149 |
+
return {"design_feedback": st.session_state.state['design_feedback']}
|
| 150 |
+
try:
|
| 151 |
+
prompt = f"""
|
| 152 |
+
Please review the following design documents and provide your feedback.
|
| 153 |
+
|
| 154 |
+
Design Documents:
|
| 155 |
+
{state.get('design_docs', 'No design documents available')}
|
| 156 |
+
|
| 157 |
+
If you approve, type "approve". Otherwise, provide detailed design feedback.
|
| 158 |
+
"""
|
| 159 |
+
response = self.llm.invoke(prompt)
|
| 160 |
+
content = getattr(response, 'content', None)
|
| 161 |
+
if content is None:
|
| 162 |
+
raise ValueError("LLM response does not contain content.")
|
| 163 |
+
self._update_state({
|
| 164 |
+
"design_feedback": content,
|
| 165 |
+
"current_result": content,
|
| 166 |
+
"current_step": "generate_code" # Updated to next node
|
| 167 |
+
})
|
| 168 |
+
st.session_state.user_decision = None
|
| 169 |
+
return {"design_feedback": content}
|
| 170 |
+
except Exception as e:
|
| 171 |
+
self.logger.exception("Error in design_review.")
|
| 172 |
+
st.error(f"Design review failed: {e}")
|
| 173 |
+
return {"error": str(e)}
|
| 174 |
+
|
| 175 |
+
def generate_code(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 176 |
+
if st.session_state.state['generate_code'] is not '':
|
| 177 |
+
return {"generate_code": st.session_state.state['generate_code']}
|
| 178 |
+
|
| 179 |
+
try:
|
| 180 |
+
prompt = f"""
|
| 181 |
+
Generate production-quality code for the following design:
|
| 182 |
+
{state.get('design_docs', 'No design documents available')}
|
| 183 |
+
|
| 184 |
+
{f"Code Review Feedback: {state.get('review_feedback', '')}" if state.get('review_feedback') else ""}
|
| 185 |
+
|
| 186 |
+
Include error handling, clear comments, and follow best coding practices.
|
| 187 |
+
"""
|
| 188 |
+
response = self.llm.invoke(prompt)
|
| 189 |
+
content = getattr(response, 'content', None)
|
| 190 |
+
if content is None:
|
| 191 |
+
raise ValueError("LLM response does not contain content.")
|
| 192 |
+
self._update_state({
|
| 193 |
+
"generate_code": content,
|
| 194 |
+
"current_result": content,
|
| 195 |
+
"current_step": "code_review" # Updated to next node
|
| 196 |
+
})
|
| 197 |
+
return {"generate_code": content}
|
| 198 |
+
except Exception as e:
|
| 199 |
+
self.logger.exception("Error in generate_code.")
|
| 200 |
+
st.error(f"Failed to generate code: {e}")
|
| 201 |
+
return {"error": str(e)}
|
| 202 |
+
|
| 203 |
+
def code_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 204 |
+
if st.session_state.state['review_feedback'] is not '':
|
| 205 |
+
return {"review_feedback": st.session_state.state['review_feedback']}
|
| 206 |
+
try:
|
| 207 |
+
prompt = f"""
|
| 208 |
+
Please review the following generated code and provide your feedback.
|
| 209 |
+
|
| 210 |
+
Code:
|
| 211 |
+
{state.get('generated_code', 'No code generated')}
|
| 212 |
+
|
| 213 |
+
If the code is acceptable, type "approve". Otherwise, provide specific code review feedback.
|
| 214 |
+
"""
|
| 215 |
+
response = self.llm.invoke(prompt)
|
| 216 |
+
content = getattr(response, 'content', None)
|
| 217 |
+
if content is None:
|
| 218 |
+
raise ValueError("LLM response does not contain content.")
|
| 219 |
+
self._update_state({
|
| 220 |
+
"code_review": content,
|
| 221 |
+
"current_result": content,
|
| 222 |
+
"current_step": "security_review" # Updated to next node
|
| 223 |
+
})
|
| 224 |
+
st.session_state.user_decision = None
|
| 225 |
+
return {"review_feedback": content}
|
| 226 |
+
except Exception as e:
|
| 227 |
+
self.logger.exception("Error in code_review.")
|
| 228 |
+
st.error(f"Code review failed: {e}")
|
| 229 |
+
return {"error": str(e)}
|
| 230 |
+
|
| 231 |
+
def security_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 232 |
+
if st.session_state.state['security_feedback'] is not '':
|
| 233 |
+
return {"security_feedback": st.session_state.state['security_feedback']}
|
| 234 |
+
try:
|
| 235 |
+
prompt = f"""
|
| 236 |
+
Please review the following code for potential security vulnerabilities:
|
| 237 |
+
|
| 238 |
+
Code:
|
| 239 |
+
{state.get('generated_code', 'No code available')}
|
| 240 |
+
|
| 241 |
+
Provide any security-related feedback.
|
| 242 |
+
"""
|
| 243 |
+
response = self.llm.invoke(prompt)
|
| 244 |
+
content = getattr(response, 'content', None)
|
| 245 |
+
if content is None:
|
| 246 |
+
raise ValueError("LLM response does not contain content.")
|
| 247 |
+
self._update_state({
|
| 248 |
+
"security_feedback": content,
|
| 249 |
+
"current_result": content,
|
| 250 |
+
"current_step": "fix_code_after_code_review" # Updated to next node
|
| 251 |
+
})
|
| 252 |
+
st.session_state.user_decision = None
|
| 253 |
+
return {"security_feedback": content, "current_step": "fix_code_after_code_review"}
|
| 254 |
+
except Exception as e:
|
| 255 |
+
self.logger.exception("Error in security_review.")
|
| 256 |
+
st.error(f"Security review failed: {e}")
|
| 257 |
+
return {"error": str(e)}
|
| 258 |
+
|
| 259 |
+
def fix_code_after_code_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 260 |
+
try:
|
| 261 |
+
prompt = f"""
|
| 262 |
+
Fix the following code based on the code review feedback provided.
|
| 263 |
+
|
| 264 |
+
Original Code: {state.get('generated_code', 'No code available')}
|
| 265 |
+
Feedback: {state.get('review_feedback', 'No feedback provided')}
|
| 266 |
+
|
| 267 |
+
Ensure that functionality is preserved while addressing all feedback.
|
| 268 |
+
"""
|
| 269 |
+
response = self.llm.invoke(prompt)
|
| 270 |
+
content = getattr(response, 'content', None)
|
| 271 |
+
if content is None:
|
| 272 |
+
raise ValueError("LLM response does not contain content.")
|
| 273 |
+
self._update_state({
|
| 274 |
+
"generated_code": content,
|
| 275 |
+
"current_result": content,
|
| 276 |
+
"current_step": "fix_code_after_security" # Updated to next node
|
| 277 |
+
})
|
| 278 |
+
st.session_state.user_decision = None
|
| 279 |
+
return {"generated_code": content}
|
| 280 |
+
except Exception as e:
|
| 281 |
+
self.logger.exception("Error in fix_code_after_code_review.")
|
| 282 |
+
st.error(f"Failed to fix code after review: {e}")
|
| 283 |
+
return {"error": str(e)}
|
| 284 |
+
|
| 285 |
+
def fix_code_after_security(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 286 |
+
try:
|
| 287 |
+
prompt = f"""
|
| 288 |
+
Improve the security of the following code based on the security review feedback:
|
| 289 |
+
|
| 290 |
+
Code: {state.get('generated_code', 'No code available')}
|
| 291 |
+
Security Feedback: {state.get('security_feedback', 'No security feedback provided')}
|
| 292 |
+
|
| 293 |
+
Apply best practices for security and fix any vulnerabilities.
|
| 294 |
+
"""
|
| 295 |
+
response = self.llm.invoke(prompt)
|
| 296 |
+
content = getattr(response, 'content', None)
|
| 297 |
+
if content is None:
|
| 298 |
+
raise ValueError("LLM response does not contain content.")
|
| 299 |
+
self._update_state({
|
| 300 |
+
"generated_code": content,
|
| 301 |
+
"current_result": content,
|
| 302 |
+
"current_step": "write_test_cases" # Updated to next node
|
| 303 |
+
})
|
| 304 |
+
return {"generated_code": content}
|
| 305 |
+
except Exception as e:
|
| 306 |
+
self.logger.exception("Error in fix_code_after_security.")
|
| 307 |
+
st.error(f"Failed to fix code for security: {e}")
|
| 308 |
+
return {"error": str(e)}
|
| 309 |
+
|
| 310 |
+
def write_test_cases(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 311 |
+
try:
|
| 312 |
+
prompt = f"""
|
| 313 |
+
Create test cases for the following code in brief :
|
| 314 |
+
{state.get('generated_code', 'No code available')}
|
| 315 |
+
|
| 316 |
+
Include positive, negative, edge, and security test cases.
|
| 317 |
+
Format the tests as a Markdown table with test steps and expected results.
|
| 318 |
+
"""
|
| 319 |
+
response = self.llm.invoke(prompt)
|
| 320 |
+
content = getattr(response, 'content', None)
|
| 321 |
+
if content is None:
|
| 322 |
+
raise ValueError("LLM response does not contain content.")
|
| 323 |
+
self._update_state({
|
| 324 |
+
"test_cases": content,
|
| 325 |
+
"current_result": content,
|
| 326 |
+
"current_step": "test_cases_review" # Updated to next node
|
| 327 |
+
})
|
| 328 |
+
return {"test_cases": content}
|
| 329 |
+
except Exception as e:
|
| 330 |
+
self.logger.exception("Error in write_test_cases.")
|
| 331 |
+
st.error(f"Failed to write test cases: {e}")
|
| 332 |
+
return {"error": str(e)}
|
| 333 |
+
|
| 334 |
+
def test_cases_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 335 |
+
try:
|
| 336 |
+
prompt = f"""
|
| 337 |
+
Please review the following test cases and provide your feedback.
|
| 338 |
+
|
| 339 |
+
Test Cases:
|
| 340 |
+
{state.get('test_cases', 'No test cases generated')}
|
| 341 |
+
|
| 342 |
+
If the test cases are acceptable, type "approve". Otherwise, provide detailed feedback.
|
| 343 |
+
"""
|
| 344 |
+
response = self.llm.invoke(prompt)
|
| 345 |
+
content = getattr(response, 'content', None)
|
| 346 |
+
if content is None:
|
| 347 |
+
raise ValueError("LLM response does not contain content.")
|
| 348 |
+
self._update_state({
|
| 349 |
+
"test_feedback": content,
|
| 350 |
+
"current_step": "fix_test_cases" # Updated to next node
|
| 351 |
+
})
|
| 352 |
+
# st.session_state.user_decision = None
|
| 353 |
+
return {"test_feedback": content}
|
| 354 |
+
except Exception as e:
|
| 355 |
+
self.logger.exception("Error in test_cases_review.")
|
| 356 |
+
# st.error(f"Test cases review failed: {e}")
|
| 357 |
+
return {"error": str(e)}
|
| 358 |
|
| 359 |
+
def decision_test_cases_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 360 |
+
try:
|
| 361 |
+
prompt = f"""
|
| 362 |
+
Based on the following test cases and the review feedback, please decide whether the test cases are acceptable or if further modifications are required.
|
| 363 |
+
|
| 364 |
+
Test Cases:
|
| 365 |
+
{state.get('test_cases', 'No test cases available')}
|
| 366 |
+
|
| 367 |
+
Review Feedback:
|
| 368 |
+
{state.get('test_feedback', 'No feedback provided')}
|
| 369 |
+
|
| 370 |
+
If the test cases are acceptable, output "approved". Otherwise, provide the modifications required.
|
| 371 |
+
"""
|
| 372 |
+
response = self.llm.invoke(prompt)
|
| 373 |
+
content = getattr(response, 'content', None)
|
| 374 |
+
if content is None:
|
| 375 |
+
raise ValueError("LLM response does not contain content.")
|
| 376 |
+
self._update_state({
|
| 377 |
+
"decision_test_cases": content,
|
| 378 |
+
"current_result": content,
|
| 379 |
+
"current_step": "fix_test_cases" # or update to the next step in your flow if needed
|
| 380 |
+
})
|
| 381 |
+
return {"decision_test_cases": content}
|
| 382 |
+
except Exception as e:
|
| 383 |
+
self.logger.exception("Error in decision_test_cases_review.")
|
| 384 |
+
st.error(f"Decision on test cases review failed: {e}")
|
| 385 |
+
return {"error": str(e)}
|
| 386 |
+
|
| 387 |
+
|
| 388 |
+
def fix_test_cases(self, state: Dict[str, Any]) -> Dict[str, Any]:
|
| 389 |
+
try:
|
| 390 |
+
prompt = f"""
|
| 391 |
+
Improve the test cases based on the following feedback:
|
| 392 |
+
|
| 393 |
+
Original Test Cases: {state.get('test_cases', 'No test cases available')}
|
| 394 |
+
Feedback: {state.get('test_feedback', 'No feedback provided')}
|
| 395 |
+
|
| 396 |
+
Ensure that all feedback points are addressed and that the Markdown table format is maintained.
|
| 397 |
+
"""
|
| 398 |
+
response = self.llm.invoke(prompt)
|
| 399 |
+
content = getattr(response, 'content', None)
|
| 400 |
+
if content is None:
|
| 401 |
+
raise ValueError("LLM response does not contain content.")
|
| 402 |
+
self._update_state({
|
| 403 |
+
"test_cases": content,
|
| 404 |
+
"current_step": "completed" # Final step
|
| 405 |
+
})
|
| 406 |
+
return {"test_cases": content}
|
| 407 |
+
except Exception as e:
|
| 408 |
+
self.logger.exception("Error in fix_test_cases.")
|
| 409 |
+
# st.error(f"Failed to fix test cases: {e}")
|
| 410 |
+
return {"error": str(e)}
|
src/langgraphagenticai/state/state.py
CHANGED
|
@@ -19,18 +19,53 @@ class PlannerState(TypedDict):
|
|
| 19 |
start_date: str
|
| 20 |
end_date: str
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
class SDLCState(TypedDict):
|
| 23 |
current_step: Literal[
|
| 24 |
"requirements",
|
| 25 |
"generate_user_stories",
|
| 26 |
-
"
|
|
|
|
|
|
|
| 27 |
"generate_code",
|
| 28 |
"code_review",
|
| 29 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
]
|
|
|
|
|
|
|
| 31 |
requirements: Optional[str]
|
| 32 |
user_stories: Optional[str]
|
| 33 |
-
|
| 34 |
generated_code: Optional[str]
|
| 35 |
-
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
start_date: str
|
| 20 |
end_date: str
|
| 21 |
|
| 22 |
+
# class SDLCState(TypedDict):
|
| 23 |
+
# current_step: Literal[
|
| 24 |
+
# "requirements",
|
| 25 |
+
# "generate_user_stories",
|
| 26 |
+
# "po_approval",
|
| 27 |
+
# "generate_code",
|
| 28 |
+
# "code_review",
|
| 29 |
+
# "completed"
|
| 30 |
+
# ]
|
| 31 |
+
# requirements: Optional[str]
|
| 32 |
+
# user_stories: Optional[str]
|
| 33 |
+
# po_feedback: Optional[str]
|
| 34 |
+
# generated_code: Optional[str]
|
| 35 |
+
# review_feedback: Optional[str]
|
| 36 |
+
# decision: Optional[Literal["approved", "feedback"]]
|
| 37 |
+
|
| 38 |
+
# from typing import TypedDict, Optional, Literal
|
| 39 |
+
|
| 40 |
class SDLCState(TypedDict):
|
| 41 |
current_step: Literal[
|
| 42 |
"requirements",
|
| 43 |
"generate_user_stories",
|
| 44 |
+
"product_owner_review",
|
| 45 |
+
"create_design_docs",
|
| 46 |
+
"design_review",
|
| 47 |
"generate_code",
|
| 48 |
"code_review",
|
| 49 |
+
"fix_code_after_code_review",
|
| 50 |
+
"security_review",
|
| 51 |
+
"fix_code_after_security",
|
| 52 |
+
"write_test_cases",
|
| 53 |
+
"test_cases_review",
|
| 54 |
+
"fix_test_cases",
|
| 55 |
+
"end"
|
| 56 |
]
|
| 57 |
+
decision: Optional[Literal["approved", "feedback"]]
|
| 58 |
+
# Content fields
|
| 59 |
requirements: Optional[str]
|
| 60 |
user_stories: Optional[str]
|
| 61 |
+
design_docs: Optional[str]
|
| 62 |
generated_code: Optional[str]
|
| 63 |
+
test_cases: Optional[str]
|
| 64 |
+
# Feedback fields
|
| 65 |
+
po_feedback: Optional[str] # Product Owner feedback
|
| 66 |
+
design_feedback: Optional[str] # Design Review feedback
|
| 67 |
+
code_feedback: Optional[str] # Code Review feedback
|
| 68 |
+
security_feedback: Optional[str] # Security Review feedback
|
| 69 |
+
test_case_feedback: Optional[str] # Test Case Review feedback
|
| 70 |
+
# For human input during interrupts.
|
| 71 |
+
human_decision: Optional[str]
|
src/langgraphagenticai/ui/streamlitui/display_result.py
CHANGED
|
@@ -1,7 +1,11 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain_core.messages import HumanMessage,AIMessage,ToolMessage
|
| 3 |
import json
|
| 4 |
|
|
|
|
|
|
|
|
|
|
| 5 |
from src.langgraphagenticai.tools.customtool import APPOINTMENTS
|
| 6 |
from src.langgraphagenticai.tools.customer_support_tools import customers_database, data_protection_checks
|
| 7 |
|
|
@@ -11,7 +15,6 @@ class DisplayResultStreamlit:
|
|
| 11 |
self.graph = graph
|
| 12 |
self.user_message = user_message
|
| 13 |
|
| 14 |
-
|
| 15 |
def display_result_on_ui(self):
|
| 16 |
usecase= self.usecase
|
| 17 |
graph = self.graph
|
|
@@ -117,18 +120,105 @@ class DisplayResultStreamlit:
|
|
| 117 |
elif usecase == "SDLC Workflow":
|
| 118 |
initial_state = self.user_message
|
| 119 |
# Invoke the workflow node for generating user stories
|
| 120 |
-
|
| 121 |
-
if '
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 132 |
|
| 133 |
def _display_travel_planner_results(self):
|
| 134 |
# Extract travel parameters from message
|
|
@@ -174,7 +264,6 @@ class DisplayResultStreamlit:
|
|
| 174 |
# Display destination and dates
|
| 175 |
st.markdown(f"{sections['Destination']}")
|
| 176 |
|
| 177 |
-
|
| 178 |
def _display_tool_calls(self, message):
|
| 179 |
"""
|
| 180 |
Displays details of tool calls made during the itinerary generation.
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
import streamlit as st
|
| 3 |
from langchain_core.messages import HumanMessage,AIMessage,ToolMessage
|
| 4 |
import json
|
| 5 |
|
| 6 |
+
from langgraph.types import interrupt, Command
|
| 7 |
+
|
| 8 |
+
from src.langgraphagenticai.ui.streamlitui.sdlcfeedback import SDLCUI
|
| 9 |
from src.langgraphagenticai.tools.customtool import APPOINTMENTS
|
| 10 |
from src.langgraphagenticai.tools.customer_support_tools import customers_database, data_protection_checks
|
| 11 |
|
|
|
|
| 15 |
self.graph = graph
|
| 16 |
self.user_message = user_message
|
| 17 |
|
|
|
|
| 18 |
def display_result_on_ui(self):
|
| 19 |
usecase= self.usecase
|
| 20 |
graph = self.graph
|
|
|
|
| 120 |
elif usecase == "SDLC Workflow":
|
| 121 |
initial_state = self.user_message
|
| 122 |
# Invoke the workflow node for generating user stories
|
| 123 |
+
|
| 124 |
+
if 'graph_config' not in st.session_state:
|
| 125 |
+
st.session_state.graph_config = {"configurable": {"thread_id": uuid.uuid4()}}
|
| 126 |
+
|
| 127 |
+
# Create a placeholder to show output or interrupts.
|
| 128 |
+
output_placeholder = st.empty()
|
| 129 |
+
|
| 130 |
+
ui = SDLCUI()
|
| 131 |
+
col1, col2 = st.columns(2) # Create two columns
|
| 132 |
+
st.session_state.state['Final_Result'] = []
|
| 133 |
+
if 'Final_Result' not in st.session_state:
|
| 134 |
+
st.session_state['Final_Result'] = []
|
| 135 |
+
with col1:
|
| 136 |
+
st.subheader('Final Result')
|
| 137 |
+
|
| 138 |
+
with col2:
|
| 139 |
+
st.subheader('human in loop : __interrupt__')
|
| 140 |
+
if st.session_state.graph_stage =='initial' and 'current_step' in st.session_state.state and st.session_state.state['current_step']!='' :
|
| 141 |
+
graph_stream = graph.stream(st.session_state["state"], config=st.session_state.graph_config )
|
| 142 |
+
if graph_stream:
|
| 143 |
+
|
| 144 |
+
for event in graph_stream:
|
| 145 |
+
with col1:
|
| 146 |
+
if "__interrupt__" not in event:
|
| 147 |
+
for d in event.values():
|
| 148 |
+
st.session_state.state["Final_Result"].append(d)
|
| 149 |
+
if d:
|
| 150 |
+
for key, value in d.items():
|
| 151 |
+
with st.expander(label=key):
|
| 152 |
+
st.markdown(value)
|
| 153 |
+
with col2:
|
| 154 |
+
if "__interrupt__" in event:
|
| 155 |
+
st.session_state.graph_stage = "waiting"
|
| 156 |
+
st.rerun()
|
| 157 |
+
break
|
| 158 |
+
else:
|
| 159 |
+
st.session_state.graph_stage = "finished"
|
| 160 |
+
# --- Stage 2: Display Human Input UI ---
|
| 161 |
+
if st.session_state.graph_stage == "waiting":
|
| 162 |
+
with col2:
|
| 163 |
+
col2_1, col2_2 = st.columns(2)
|
| 164 |
+
st.info(f"Current Steps : {st.session_state.state['current_step']}")
|
| 165 |
+
|
| 166 |
+
feedback = st.text_area("Feedback (enter text to reject)", key="feedback_input")
|
| 167 |
+
with col2_1:
|
| 168 |
+
if st.button("β
Approve"):
|
| 169 |
+
st.session_state.user_decision = "approve"
|
| 170 |
+
st.session_state.graph_stage = "resumed"
|
| 171 |
+
st.rerun()
|
| 172 |
+
with col2_2:
|
| 173 |
+
if st.button("π Request Change"):
|
| 174 |
+
st.session_state.user_decision = feedback if feedback else "reject"
|
| 175 |
+
st.session_state.graph_stage = "resumed"
|
| 176 |
+
st.rerun()
|
| 177 |
+
|
| 178 |
+
if st.session_state.graph_stage == "waiting":
|
| 179 |
+
st.stop()
|
| 180 |
+
|
| 181 |
+
# --- Stage 3: Resume Graph Execution ---
|
| 182 |
+
# When resuming after interrupt
|
| 183 |
+
if st.session_state.graph_stage == "resumed":
|
| 184 |
+
resume_state = {
|
| 185 |
+
"human_decision": st.session_state.user_decision,
|
| 186 |
+
}
|
| 187 |
+
for event in graph.stream(
|
| 188 |
+
st.session_state["state"],
|
| 189 |
+
config=st.session_state.thread_config
|
| 190 |
+
):
|
| 191 |
+
with col1:
|
| 192 |
+
if "__interrupt__" not in event:
|
| 193 |
+
for d in event.values():
|
| 194 |
+
st.session_state.state["Final_Result"].append(d)
|
| 195 |
+
if d:
|
| 196 |
+
for key, value in d.items():
|
| 197 |
+
with st.expander(label=key):
|
| 198 |
+
st.markdown(value)
|
| 199 |
+
with col2:
|
| 200 |
+
if "__interrupt__" in event:
|
| 201 |
+
st.session_state.graph_stage = "waiting"
|
| 202 |
+
st.rerun()
|
| 203 |
+
break
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
# Determine if the workflow has reached the finish point (fix_test_cases node reached).
|
| 207 |
+
if st.session_state.state['current_step']=='completed' and any(isinstance(event, dict) and event.get("fix_test_cases") ):
|
| 208 |
+
st.session_state.graph_stage = "finished"
|
| 209 |
+
ui.render_end(state=st.session_state.state)
|
| 210 |
+
else:
|
| 211 |
+
st.session_state.graph_stage = "initial"
|
| 212 |
+
st.rerun()
|
| 213 |
+
|
| 214 |
+
# --- Stage 4: Workflow Finished ---
|
| 215 |
+
if st.session_state.graph_stage == "finished":
|
| 216 |
+
st.write("### Workflow Complete")
|
| 217 |
+
ui.render_end(state=st.session_state.state)
|
| 218 |
+
if graph:
|
| 219 |
+
st.write('state graph - workflow')
|
| 220 |
+
st.image(graph.get_graph(xray=True).draw_mermaid_png())
|
| 221 |
+
|
| 222 |
|
| 223 |
def _display_travel_planner_results(self):
|
| 224 |
# Extract travel parameters from message
|
|
|
|
| 264 |
# Display destination and dates
|
| 265 |
st.markdown(f"{sections['Destination']}")
|
| 266 |
|
|
|
|
| 267 |
def _display_tool_calls(self, message):
|
| 268 |
"""
|
| 269 |
Displays details of tool calls made during the itinerary generation.
|
src/langgraphagenticai/ui/streamlitui/loadui.py
CHANGED
|
@@ -1,7 +1,9 @@
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import os
|
| 3 |
from datetime import date
|
| 4 |
|
|
|
|
| 5 |
from src.langgraphagenticai.ui.uiconfigfile import Config
|
| 6 |
from langchain_core.messages import AIMessage, HumanMessage
|
| 7 |
|
|
@@ -14,14 +16,20 @@ class LoadStreamlitUI:
|
|
| 14 |
def initialize_session(self):
|
| 15 |
return {
|
| 16 |
"current_step": "requirements",
|
|
|
|
| 17 |
"requirements": "",
|
| 18 |
"user_stories": "",
|
|
|
|
|
|
|
|
|
|
| 19 |
"po_feedback": "",
|
| 20 |
-
"
|
|
|
|
| 21 |
"review_feedback": "",
|
| 22 |
-
"
|
|
|
|
|
|
|
| 23 |
}
|
| 24 |
-
|
| 25 |
def render_requirements(self):
|
| 26 |
st.markdown("## π Requirements Submission")
|
| 27 |
st.session_state.state["requirements"] = st.text_area(
|
|
@@ -40,6 +48,8 @@ class LoadStreamlitUI:
|
|
| 40 |
st.session_state.timeframe = ''
|
| 41 |
st.session_state.IsFetchButtonClicked = False
|
| 42 |
st.session_state.IsSDLC = False
|
|
|
|
|
|
|
| 43 |
|
| 44 |
|
| 45 |
|
|
@@ -73,6 +83,9 @@ class LoadStreamlitUI:
|
|
| 73 |
# Validate API key
|
| 74 |
if not self.user_controls["TAVILY_API_KEY"]:
|
| 75 |
st.warning("β οΈ Please enter your TAVILY_API_KEY key to proceed. Don't have? refer : https://app.tavily.com/home")
|
|
|
|
|
|
|
|
|
|
| 76 |
if self.user_controls['selected_usecase'] == "Appointment Receptionist":
|
| 77 |
col1, col2 = st.columns(2)
|
| 78 |
with col1:
|
|
@@ -144,11 +157,37 @@ class LoadStreamlitUI:
|
|
| 144 |
# Added for SDLC Workflow
|
| 145 |
elif self.user_controls['selected_usecase']=="SDLC Workflow":
|
| 146 |
st.subheader(" SDLC Workflow ")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
-
|
| 149 |
-
|
| 150 |
st.session_state.state = self.initialize_session()
|
| 151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
|
| 153 |
|
| 154 |
return self.user_controls
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
import streamlit as st
|
| 3 |
import os
|
| 4 |
from datetime import date
|
| 5 |
|
| 6 |
+
from src.langgraphagenticai.ui.streamlitui.sdlcfeedback import SDLCUI
|
| 7 |
from src.langgraphagenticai.ui.uiconfigfile import Config
|
| 8 |
from langchain_core.messages import AIMessage, HumanMessage
|
| 9 |
|
|
|
|
| 16 |
def initialize_session(self):
|
| 17 |
return {
|
| 18 |
"current_step": "requirements",
|
| 19 |
+
"decision": None,
|
| 20 |
"requirements": "",
|
| 21 |
"user_stories": "",
|
| 22 |
+
"design_docs": "",
|
| 23 |
+
"code": "",
|
| 24 |
+
"test_cases": "",
|
| 25 |
"po_feedback": "",
|
| 26 |
+
"design_feedback":"",
|
| 27 |
+
'generate_code':"",
|
| 28 |
"review_feedback": "",
|
| 29 |
+
"security_feedback": "",
|
| 30 |
+
"test_feedback": "",
|
| 31 |
+
"human_decision": ""
|
| 32 |
}
|
|
|
|
| 33 |
def render_requirements(self):
|
| 34 |
st.markdown("## π Requirements Submission")
|
| 35 |
st.session_state.state["requirements"] = st.text_area(
|
|
|
|
| 48 |
st.session_state.timeframe = ''
|
| 49 |
st.session_state.IsFetchButtonClicked = False
|
| 50 |
st.session_state.IsSDLC = False
|
| 51 |
+
ui = SDLCUI()
|
| 52 |
+
|
| 53 |
|
| 54 |
|
| 55 |
|
|
|
|
| 83 |
# Validate API key
|
| 84 |
if not self.user_controls["TAVILY_API_KEY"]:
|
| 85 |
st.warning("β οΈ Please enter your TAVILY_API_KEY key to proceed. Don't have? refer : https://app.tavily.com/home")
|
| 86 |
+
|
| 87 |
+
if self.user_controls['selected_usecase']!="SDLC Workflow":
|
| 88 |
+
st.session_state['state'] = ''
|
| 89 |
if self.user_controls['selected_usecase'] == "Appointment Receptionist":
|
| 90 |
col1, col2 = st.columns(2)
|
| 91 |
with col1:
|
|
|
|
| 157 |
# Added for SDLC Workflow
|
| 158 |
elif self.user_controls['selected_usecase']=="SDLC Workflow":
|
| 159 |
st.subheader(" SDLC Workflow ")
|
| 160 |
+
# if 'status' in st.session_state.state and st.session_state.state['status']== "__interrupt__" and 'decision' in st.session_state.state and st.session_state.state['decision']==None :
|
| 161 |
+
# ui.render()
|
| 162 |
+
if "requirements" in st.session_state.state and st.session_state.state['current_step']=="requirements":
|
| 163 |
+
ui.render_requirements(st.session_state.state)
|
| 164 |
+
# if st.session_state.state !='':
|
| 165 |
+
|
| 166 |
+
# ui.render()
|
| 167 |
+
|
| 168 |
+
|
| 169 |
|
| 170 |
+
if st.button("start workflow"):
|
| 171 |
+
st.session_state.state = ''
|
| 172 |
st.session_state.state = self.initialize_session()
|
| 173 |
+
# Initialize session state variables.
|
| 174 |
+
if "thread_config" not in st.session_state:
|
| 175 |
+
st.session_state.thread_config = {"configurable": {"thread_id": uuid.uuid4()}}
|
| 176 |
+
if "graph_stage" not in st.session_state:
|
| 177 |
+
st.session_state.graph_stage = "initial" # Stages: initial, waiting, resumed, finished.
|
| 178 |
+
if "output_chunks" not in st.session_state:
|
| 179 |
+
st.session_state.output_chunks = []
|
| 180 |
+
if "user_decision" not in st.session_state:
|
| 181 |
+
st.session_state.user_decision = None
|
| 182 |
+
st.session_state.IsSDLC = True
|
| 183 |
+
ui.render_requirements(st.session_state.state)
|
| 184 |
+
|
| 185 |
+
# if st.session_state.state['current_step']=="requirements":
|
| 186 |
+
# self.render_requirements()
|
| 187 |
+
if 'requirements' in st.session_state.state and st.session_state.state['requirements']=='':
|
| 188 |
+
st.stop()
|
| 189 |
+
|
| 190 |
+
|
| 191 |
|
| 192 |
|
| 193 |
return self.user_controls
|
src/langgraphagenticai/ui/streamlitui/sdlcfeedback.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# sdlc_feedback.py
|
| 2 |
+
import streamlit as st
|
| 3 |
+
|
| 4 |
+
class SDLCUI:
|
| 5 |
+
def render(self):
|
| 6 |
+
if "state" not in st.session_state:
|
| 7 |
+
st.session_state.state = {"current_step": "user_input"}
|
| 8 |
+
state = st.session_state.state
|
| 9 |
+
|
| 10 |
+
st.markdown(f"**Current Step:** {state['current_step'].replace('_', ' ').title()}")
|
| 11 |
+
|
| 12 |
+
step_method = getattr(self, f"render_{state['current_step']}", None)
|
| 13 |
+
if step_method:
|
| 14 |
+
step_method(state)
|
| 15 |
+
else:
|
| 16 |
+
st.error(f"Unknown step: {state['current_step']}")
|
| 17 |
+
|
| 18 |
+
def render_requirements(self, state):
|
| 19 |
+
st.markdown("### Requirements Input")
|
| 20 |
+
requirements = st.text_area("Enter your requirements:", height=200)
|
| 21 |
+
if st.button("Submit Requirements"):
|
| 22 |
+
state["requirements"] = requirements
|
| 23 |
+
state["current_step"] = "generate_user_stories"
|
| 24 |
+
st.session_state.state["current_step"] = "generate_user_stories"
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def render_generate_user_stories(self, state):
|
| 30 |
+
st.markdown("### Generated User Stories")
|
| 31 |
+
if st.session_state.state['user_stories']!='':
|
| 32 |
+
with st.expander("View User Stories"):
|
| 33 |
+
st.markdown(st.session_state.state['user_stories'])
|
| 34 |
+
if st.button("Continue to Product Owner Review"):
|
| 35 |
+
state["current_step"] = "product_owner_review"
|
| 36 |
+
st.session_state.state["current_step"] = "product_owner_review"
|
| 37 |
+
# st.rerun()
|
| 38 |
+
|
| 39 |
+
def render_product_owner_review(self, state):
|
| 40 |
+
st.markdown("### Product Owner Review")
|
| 41 |
+
with st.expander("View Current User Stories"):
|
| 42 |
+
st.markdown(state.get("user_stories", ""))
|
| 43 |
+
st.markdown("### Review Actions")
|
| 44 |
+
feedback = st.text_area("Feedback:", height=150, key='Feedback')
|
| 45 |
+
|
| 46 |
+
col1, col2 = st.columns(2)
|
| 47 |
+
with col1:
|
| 48 |
+
if st.button("π Approve Design Phase", key="approve_design"):
|
| 49 |
+
state["decision"] = "approved"
|
| 50 |
+
state["current_step"] = "create_design_docs"
|
| 51 |
+
st.session_state.user_decision = "approve"
|
| 52 |
+
st.session_state.graph_stage = "resumed"
|
| 53 |
+
st.rerun()
|
| 54 |
+
with col2:
|
| 55 |
+
if st.button("π§ Request Revisions", key="request_revisions"):
|
| 56 |
+
state["decision"] = "feedback"
|
| 57 |
+
state["po_feedback"] = feedback
|
| 58 |
+
state["current_step"] = "revise_user_stories"
|
| 59 |
+
st.rerun()
|
| 60 |
+
|
| 61 |
+
def render_create_design_docs(self, state):
|
| 62 |
+
st.markdown("### Design Documents")
|
| 63 |
+
with st.expander("View Design Documents"):
|
| 64 |
+
st.markdown(state.get("design_docs", ""))
|
| 65 |
+
if st.button("Proceed to Design Review"):
|
| 66 |
+
state["current_step"] = "design_review"
|
| 67 |
+
st.rerun()
|
| 68 |
+
|
| 69 |
+
def render_design_review(self, state):
|
| 70 |
+
st.markdown("### Design Review")
|
| 71 |
+
with st.expander("View Design Documents"):
|
| 72 |
+
st.markdown(state.get("design_docs", ""))
|
| 73 |
+
st.markdown("### Review Actions")
|
| 74 |
+
feedback = st.text_area("Design Feedback:", height=150, key="design_feedback")
|
| 75 |
+
|
| 76 |
+
col1, col2 = st.columns(2)
|
| 77 |
+
with col1:
|
| 78 |
+
if st.button("π Approve Implementation", key="approve_implementation"):
|
| 79 |
+
state["decision"] = "approved"
|
| 80 |
+
state["current_step"] = "generate_code"
|
| 81 |
+
st.session_state.user_decision = "approve"
|
| 82 |
+
st.session_state.graph_stage = "resumed"
|
| 83 |
+
st.rerun()
|
| 84 |
+
with col2:
|
| 85 |
+
if st.button("π§ Request Design Changes", key="request_design_changes"):
|
| 86 |
+
state["decision"] = "feedback"
|
| 87 |
+
state["design_feedback"] = feedback
|
| 88 |
+
state["current_step"] = "create_design_docs"
|
| 89 |
+
st.rerun()
|
| 90 |
+
|
| 91 |
+
def render_generate_code(self, state):
|
| 92 |
+
st.markdown("### Generated Code")
|
| 93 |
+
with st.expander("View Code Implementation"):
|
| 94 |
+
st.code(state.get("code", ""), language='python')
|
| 95 |
+
if st.button("Proceed to Code Review"):
|
| 96 |
+
state["current_step"] = "code_review"
|
| 97 |
+
st.rerun()
|
| 98 |
+
|
| 99 |
+
def render_code_review(self, state):
|
| 100 |
+
st.markdown("### Code Review")
|
| 101 |
+
with st.expander("View Current Code"):
|
| 102 |
+
st.code(state.get("code", ""), language='python')
|
| 103 |
+
st.markdown("### Review Actions")
|
| 104 |
+
feedback = st.text_area("Code Feedback:", height=150, key="code_feedback")
|
| 105 |
+
|
| 106 |
+
col1, col2 = st.columns(2)
|
| 107 |
+
with col1:
|
| 108 |
+
if st.button("π Approve for Security Review", key="approve_security"):
|
| 109 |
+
state["decision"] = "approved"
|
| 110 |
+
state["current_step"] = "security_review"
|
| 111 |
+
st.session_state.user_decision = "approve"
|
| 112 |
+
st.session_state.graph_stage = "resumed"
|
| 113 |
+
st.rerun()
|
| 114 |
+
with col2:
|
| 115 |
+
if st.button("π§ Request Code Fixes", key="request_code_fixes"):
|
| 116 |
+
state["decision"] = "feedback"
|
| 117 |
+
state["review_feedback"] = feedback
|
| 118 |
+
state["current_step"] = "fix_code_after_code_review"
|
| 119 |
+
st.rerun()
|
| 120 |
+
|
| 121 |
+
def render_security_review(self, state):
|
| 122 |
+
st.markdown("### Security Review")
|
| 123 |
+
with st.expander("View Code for Security Audit"):
|
| 124 |
+
st.code(state.get("code", ""), language='python')
|
| 125 |
+
st.markdown("### Security Findings")
|
| 126 |
+
feedback = st.text_area("Security Feedback:", height=150, key="security_feedback")
|
| 127 |
+
|
| 128 |
+
col1, col2 = st.columns(2)
|
| 129 |
+
with col1:
|
| 130 |
+
if st.button("π Approve for Testing", key="approve_testing"):
|
| 131 |
+
state["decision"] = "approved"
|
| 132 |
+
state["current_step"] = "write_test_cases"
|
| 133 |
+
st.session_state.user_decision = "approve"
|
| 134 |
+
st.session_state.graph_stage = "resumed"
|
| 135 |
+
st.rerun()
|
| 136 |
+
with col2:
|
| 137 |
+
if st.button("π§ Request Security Fixes", key="request_security_fixes"):
|
| 138 |
+
state["decision"] = "feedback"
|
| 139 |
+
state["security_feedback"] = feedback
|
| 140 |
+
state["current_step"] = "fix_code_after_security"
|
| 141 |
+
st.rerun()
|
| 142 |
+
|
| 143 |
+
def render_write_test_cases(self, state):
|
| 144 |
+
st.markdown("### Test Cases")
|
| 145 |
+
with st.expander("View Test Cases"):
|
| 146 |
+
st.markdown(state.get("test_cases", ""))
|
| 147 |
+
if st.button("Proceed to Test Review"):
|
| 148 |
+
state["current_step"] = "test_cases_review"
|
| 149 |
+
st.rerun()
|
| 150 |
+
|
| 151 |
+
def render_test_cases_review(self, state):
|
| 152 |
+
st.markdown("### Test Cases Review")
|
| 153 |
+
with st.expander("View Current Test Cases"):
|
| 154 |
+
st.markdown(state.get("test_cases", ""))
|
| 155 |
+
st.markdown("### Review Actions")
|
| 156 |
+
feedback = st.text_area("Test Feedback:", height=150, key="test_feedback")
|
| 157 |
+
|
| 158 |
+
col1, col2 = st.columns(2)
|
| 159 |
+
with col1:
|
| 160 |
+
if st.button("π Final Approval", key="final_approve"):
|
| 161 |
+
state["decision"] = "approved"
|
| 162 |
+
state["current_step"] = "end"
|
| 163 |
+
st.rerun()
|
| 164 |
+
with col2:
|
| 165 |
+
if st.button("π§ Improve Test Cases", key="request_test_fixes"):
|
| 166 |
+
state["decision"] = "feedback"
|
| 167 |
+
state["test_feedback"] = feedback
|
| 168 |
+
state["current_step"] = "fix_test_cases"
|
| 169 |
+
st.rerun()
|
| 170 |
+
|
| 171 |
+
def render_end(self, state):
|
| 172 |
+
st.success("β
SDLC Process Completed Successfully!")
|
| 173 |
+
with st.expander("Requirements"):
|
| 174 |
+
st.write(state.get("requirements", ""))
|
| 175 |
+
with st.expander("Decision"):
|
| 176 |
+
st.write(state.get("decision", ""))
|
| 177 |
+
with st.expander("User Stories"):
|
| 178 |
+
st.write(state.get("user_stories", ""))
|
| 179 |
+
with st.expander("Design Documents"):
|
| 180 |
+
st.write(state.get("design_docs", ""))
|
| 181 |
+
with st.expander("Design Feedback"):
|
| 182 |
+
st.write(state.get("design_feedback", ""))
|
| 183 |
+
with st.expander("Generated Code"):
|
| 184 |
+
st.write(state.get("generate_code", ""))
|
| 185 |
+
with st.expander("Test Cases"):
|
| 186 |
+
st.write(state.get("test_cases", ""))
|
| 187 |
+
with st.expander("PO Feedback"):
|
| 188 |
+
st.write(state.get("po_feedback", ""))
|
| 189 |
+
with st.expander("Review Feedback"):
|
| 190 |
+
st.write(state.get("review_feedback", ""))
|
| 191 |
+
with st.expander("Security Feedback"):
|
| 192 |
+
st.write(state.get("security_feedback", ""))
|
| 193 |
+
with st.expander("Test Feedback"):
|
| 194 |
+
st.write(state.get("test_feedback", ""))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
with st.expander("Final Artifacts", expanded=True):
|
| 199 |
+
st.markdown("### Download Assets")
|
| 200 |
+
st.download_button("π₯ Requirements",
|
| 201 |
+
data=state.get("requirements", ""),
|
| 202 |
+
file_name="requirements.md")
|
| 203 |
+
st.download_button("π₯ User Stories",
|
| 204 |
+
data=state.get("user_stories", ""),
|
| 205 |
+
file_name="user_stories.md")
|
| 206 |
+
st.download_button("π₯ Design Documents",
|
| 207 |
+
data=state.get("design_docs", ""),
|
| 208 |
+
file_name="design_docs.md")
|
| 209 |
+
st.download_button("π₯ Generated Code",
|
| 210 |
+
data=state.get("code", ""),
|
| 211 |
+
file_name="generated_code.md")
|
| 212 |
+
st.download_button("π₯ Test Cases",
|
| 213 |
+
data=state.get("test_cases", ""),
|
| 214 |
+
file_name="test_cases.md")
|
| 215 |
+
st.download_button("π₯ Product Owner Feedback",
|
| 216 |
+
data=state.get("po_feedback", ""),
|
| 217 |
+
file_name="po_feedback.md")
|
| 218 |
+
st.download_button("π₯ Design Feedback",
|
| 219 |
+
data=state.get("design_feedback", ""),
|
| 220 |
+
file_name="design_feedback.md")
|
| 221 |
+
st.download_button("π₯ Code Review Feedback",
|
| 222 |
+
data=state.get("review_feedback", ""),
|
| 223 |
+
file_name="code_review_feedback.md")
|
| 224 |
+
st.download_button("π₯ Security Feedback",
|
| 225 |
+
data=state.get("security_feedback", ""),
|
| 226 |
+
file_name="security_feedback.md")
|
| 227 |
+
st.download_button("π₯ Test Cases Feedback",
|
| 228 |
+
data=state.get("test_feedback", ""),
|
| 229 |
+
file_name="test_feedback.md")
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
if st.button("π Restart Process"):
|
| 233 |
+
st.session_state.clear()
|
| 234 |
+
st.rerun()
|