Updated latest code changes
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .streamlit/config.toml +6 -0
- app.py +5 -0
- notebook/sdlc.ipynb +0 -0
- requirements.txt +8 -0
- src/sdlc/LLMS/__init__.py +0 -0
- src/sdlc/LLMS/groqllm.py +22 -0
- src/sdlc/LLMS/openaillm.py +22 -0
- src/sdlc/__init__.py +27 -0
- src/sdlc/graph/__init__.py +0 -0
- src/sdlc/graph/graph_builder.py +122 -0
- src/sdlc/graph/subgraph_builder.py +51 -0
- src/sdlc/main.py +76 -0
- src/sdlc/nodes/__init__.py +0 -0
- src/sdlc/nodes/code_feedback.py +42 -0
- src/sdlc/nodes/code_generation_node.py +23 -0
- src/sdlc/nodes/code_orchestrator.py +30 -0
- src/sdlc/nodes/code_reviewer_node.py +57 -0
- src/sdlc/nodes/code_subgraph_node.py +26 -0
- src/sdlc/nodes/consolidated_node.py +15 -0
- src/sdlc/nodes/deployment_node.py +20 -0
- src/sdlc/nodes/design_documents_feedback.py +35 -0
- src/sdlc/nodes/design_documents_node.py +30 -0
- src/sdlc/nodes/design_summarize.py +20 -0
- src/sdlc/nodes/maintanence_node.py +20 -0
- src/sdlc/nodes/monitor_fb_node.py +27 -0
- src/sdlc/nodes/monitoring_node.py +20 -0
- src/sdlc/nodes/qa_feedback.py +29 -0
- src/sdlc/nodes/qatesting_node.py +167 -0
- src/sdlc/nodes/security_check_node.py +25 -0
- src/sdlc/nodes/security_feedback.py +35 -0
- src/sdlc/nodes/synthesizer_node.py +27 -0
- src/sdlc/nodes/test_cases_feedback.py +35 -0
- src/sdlc/nodes/test_cases_node.py +24 -0
- src/sdlc/nodes/userstories_feedback.py +37 -0
- src/sdlc/nodes/userstories_node.py +27 -0
- src/sdlc/prompts/__init__.py +0 -0
- src/sdlc/prompts/prompts.py +266 -0
- src/sdlc/schema/__init__.py +0 -0
- src/sdlc/schema/codefiles.py +16 -0
- src/sdlc/schema/codefiletypes.py +16 -0
- src/sdlc/states/__init__.py +0 -0
- src/sdlc/states/states.py +50 -0
- src/sdlc/ui/__init__.py +0 -0
- src/sdlc/ui/streamlitui/display_artifacts.py +28 -0
- src/sdlc/ui/streamlitui/display_code.py +60 -0
- src/sdlc/ui/streamlitui/display_qa_testing.py +21 -0
- src/sdlc/ui/streamlitui/display_result.py +186 -0
- src/sdlc/ui/streamlitui/loadui.py +44 -0
- src/sdlc/ui/uiconfigfile.ini +6 -0
- src/sdlc/ui/uiconfigfile.py +23 -0
.streamlit/config.toml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[theme]
|
| 2 |
+
base = "dark"
|
| 3 |
+
primaryColor = "#1DB954"
|
| 4 |
+
backgroundColor = "#000000"
|
| 5 |
+
secondaryBackgroundColor = "#1a1a1a"
|
| 6 |
+
textColor = "#ffffff"
|
app.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.main import load_sdlc_app
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
if __name__=="__main__":
|
| 5 |
+
load_sdlc_app()
|
notebook/sdlc.ipynb
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
langchain
|
| 2 |
+
python-dotenv
|
| 3 |
+
langchain-groq
|
| 4 |
+
langchain-openai
|
| 5 |
+
langchain-core
|
| 6 |
+
langchain-community
|
| 7 |
+
langgraph
|
| 8 |
+
streamlit
|
src/sdlc/LLMS/__init__.py
ADDED
|
File without changes
|
src/sdlc/LLMS/groqllm.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from langchain_groq import ChatGroq
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class GroqLLM:
|
| 7 |
+
def __init__(self,user_controls_input):
|
| 8 |
+
self.user_controls_input=user_controls_input
|
| 9 |
+
|
| 10 |
+
def get_llm_model(self):
|
| 11 |
+
try:
|
| 12 |
+
groq_api_key=self.user_controls_input['GROQ_API_KEY']
|
| 13 |
+
selected_groq_model=self.user_controls_input['selected_groq_model']
|
| 14 |
+
if groq_api_key=='' and os.environ["GROQ_API_KEY"] =='':
|
| 15 |
+
st.error("Please Enter the Groq API KEY")
|
| 16 |
+
|
| 17 |
+
llm = ChatGroq(api_key =groq_api_key, model=selected_groq_model, tools=[])
|
| 18 |
+
|
| 19 |
+
except Exception as e:
|
| 20 |
+
raise ValueError(f"Error Occurred with Exception : {e}")
|
| 21 |
+
return llm
|
| 22 |
+
|
src/sdlc/LLMS/openaillm.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import streamlit as st
|
| 3 |
+
from langchain_openai import ChatOpenAI
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class OpenAILLM:
|
| 7 |
+
def __init__(self,user_controls_input):
|
| 8 |
+
self.user_controls_input=user_controls_input
|
| 9 |
+
|
| 10 |
+
def get_llm_model(self):
|
| 11 |
+
try:
|
| 12 |
+
openai_api_key=self.user_controls_input['OPENAI_API_KEY']
|
| 13 |
+
selected_openai_model=self.user_controls_input['selected_openai_model']
|
| 14 |
+
if openai_api_key=='' and os.environ["OPENAI_API_KEY"] =='':
|
| 15 |
+
st.error("Please Enter the OPENAI API KEY")
|
| 16 |
+
|
| 17 |
+
llm = ChatOpenAI(api_key =openai_api_key, model=selected_openai_model)
|
| 18 |
+
|
| 19 |
+
except Exception as e:
|
| 20 |
+
raise ValueError(f"Error Occurred with Exception : {e}")
|
| 21 |
+
return llm
|
| 22 |
+
|
src/sdlc/__init__.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import logging
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
|
| 6 |
+
logging_str = "[%(asctime)s: %(levelname)s: %(module)s: %(message)s]"
|
| 7 |
+
log_dir = "logs"
|
| 8 |
+
# Format: YYYY-MM-DD_HH-MM-SS
|
| 9 |
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
| 10 |
+
log_filename = f"{timestamp}_sdlc.log"
|
| 11 |
+
|
| 12 |
+
log_filepath = os.path.join(log_dir, log_filename)
|
| 13 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
logging.basicConfig(
|
| 18 |
+
level= logging.INFO,
|
| 19 |
+
format= logging_str,
|
| 20 |
+
|
| 21 |
+
handlers=[
|
| 22 |
+
logging.FileHandler(log_filepath),
|
| 23 |
+
logging.StreamHandler(sys.stdout)
|
| 24 |
+
]
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
logger = logging.getLogger("sdlcLogger")
|
src/sdlc/graph/__init__.py
ADDED
|
File without changes
|
src/sdlc/graph/graph_builder.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langgraph.graph import StateGraph, START,END
|
| 2 |
+
from langgraph.checkpoint.memory import MemorySaver
|
| 3 |
+
from src.sdlc.states.states import State
|
| 4 |
+
from src.sdlc.nodes.userstories_node import UserStoriesNode
|
| 5 |
+
from src.sdlc.nodes.userstories_feedback import UserStoriesFeedback
|
| 6 |
+
from src.sdlc.nodes.design_documents_node import DesignDocumentsNode
|
| 7 |
+
from src.sdlc.nodes.design_documents_feedback import DesignDocumentsFeedback
|
| 8 |
+
from src.sdlc.nodes.design_summarize import DesignSummarizeNode
|
| 9 |
+
from src.sdlc.nodes.code_subgraph_node import CoderSubgraphNode
|
| 10 |
+
from src.sdlc.nodes.code_feedback import CodeFeedback
|
| 11 |
+
from src.sdlc.nodes.security_check_node import SecurityCheckNode
|
| 12 |
+
from src.sdlc.nodes.security_feedback import SecurityReviewFeedback
|
| 13 |
+
from src.sdlc.nodes.test_cases_node import TestCasesNode
|
| 14 |
+
from src.sdlc.nodes.test_cases_feedback import TestCasesFeedback
|
| 15 |
+
from src.sdlc.nodes.qatesting_node import QATestingNode
|
| 16 |
+
from src.sdlc.nodes.qa_feedback import QAFeedback
|
| 17 |
+
from src.sdlc.nodes.deployment_node import DeploymentNode
|
| 18 |
+
from src.sdlc.nodes.monitoring_node import MonitoringNode
|
| 19 |
+
from src.sdlc.nodes.monitor_fb_node import MonitorFeedback
|
| 20 |
+
from src.sdlc.nodes.maintanence_node import MaintanenceNode
|
| 21 |
+
from src.sdlc.nodes.consolidated_node import ConsolidatedNode
|
| 22 |
+
|
| 23 |
+
class GraphBuilder:
|
| 24 |
+
|
| 25 |
+
def __init__(self,model):
|
| 26 |
+
self.llm=model
|
| 27 |
+
self.memory=MemorySaver()
|
| 28 |
+
self.graph_builder=StateGraph(State)
|
| 29 |
+
|
| 30 |
+
def build_graph(self):
|
| 31 |
+
"""
|
| 32 |
+
Builds a SDLC graph using LangGraph.
|
| 33 |
+
This method initializes nodes using the and integrates
|
| 34 |
+
it into the graph.
|
| 35 |
+
"""
|
| 36 |
+
self.userstories_node=UserStoriesNode(self.llm)
|
| 37 |
+
self.human_fb_userstories=UserStoriesFeedback()
|
| 38 |
+
self.design_documents_node=DesignDocumentsNode(self.llm)
|
| 39 |
+
self.human_fb_design=DesignDocumentsFeedback()
|
| 40 |
+
self.design_summarizer=DesignSummarizeNode(self.llm)
|
| 41 |
+
self.coder_subgraph=CoderSubgraphNode(self.llm)
|
| 42 |
+
self.human_fb_code=CodeFeedback()
|
| 43 |
+
self.security_check_node=SecurityCheckNode(self.llm)
|
| 44 |
+
self.human_fb_review=SecurityReviewFeedback()
|
| 45 |
+
self.test_cases_node=TestCasesNode(self.llm)
|
| 46 |
+
self.human_fb_testcases=TestCasesFeedback()
|
| 47 |
+
self.qa_testing_node=QATestingNode()
|
| 48 |
+
self.human_fb_qatesting=QAFeedback()
|
| 49 |
+
self.deployment_node=DeploymentNode(self.llm)
|
| 50 |
+
self.monitoring_node=MonitoringNode(self.llm)
|
| 51 |
+
self.monitor_fb_node=MonitorFeedback()
|
| 52 |
+
self.maintanence_node=MaintanenceNode(self.llm)
|
| 53 |
+
self.consolidated_node=ConsolidatedNode()
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
self.graph_builder.add_node("userstories_generator",self.userstories_node.process)
|
| 57 |
+
self.graph_builder.add_node("human_fb_userstories", self.human_fb_userstories.process)
|
| 58 |
+
self.graph_builder.add_node("design_documents_generator",self.design_documents_node.process)
|
| 59 |
+
self.graph_builder.add_node("human_fb_design", self.human_fb_design.process)
|
| 60 |
+
self.graph_builder.add_node("design_summarizer", self.design_summarizer.process)
|
| 61 |
+
self.graph_builder.add_node("coder_subgraph",self.coder_subgraph.process)
|
| 62 |
+
self.graph_builder.add_node("human_fb_code", self.human_fb_code.process)
|
| 63 |
+
self.graph_builder.add_node("security_check_generator",self.security_check_node.process)
|
| 64 |
+
self.graph_builder.add_node("human_fb_review", self.human_fb_review.process)
|
| 65 |
+
self.graph_builder.add_node("test_cases_generator",self.test_cases_node.process)
|
| 66 |
+
self.graph_builder.add_node("human_fb_testcases",self.human_fb_testcases.process)
|
| 67 |
+
self.graph_builder.add_node("qa_testing_node",self.qa_testing_node.process)
|
| 68 |
+
self.graph_builder.add_node("human_fb_qatesting",self.human_fb_qatesting.process)
|
| 69 |
+
self.graph_builder.add_node("deployment_node",self.deployment_node.process)
|
| 70 |
+
self.graph_builder.add_node("monitoring_node",self.monitoring_node.process)
|
| 71 |
+
self.graph_builder.add_node("monitor_fb_node",self.monitor_fb_node.process)
|
| 72 |
+
self.graph_builder.add_node("maintanence_node",self.maintanence_node.process)
|
| 73 |
+
self.graph_builder.add_node("consolidated_node",self.consolidated_node.process)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
self.graph_builder.add_edge(START,"userstories_generator")
|
| 77 |
+
self.graph_builder.add_edge("userstories_generator","human_fb_userstories")
|
| 78 |
+
self.graph_builder.add_conditional_edges("human_fb_userstories",
|
| 79 |
+
self.human_fb_userstories.user_story_review,
|
| 80 |
+
["userstories_generator", "design_documents_generator"])
|
| 81 |
+
self.graph_builder.add_edge("design_documents_generator","human_fb_design")
|
| 82 |
+
self.graph_builder.add_conditional_edges("human_fb_design",
|
| 83 |
+
self.human_fb_design.design_document_review,
|
| 84 |
+
["design_documents_generator", "design_summarizer"])
|
| 85 |
+
self.graph_builder.add_edge("design_summarizer","coder_subgraph")
|
| 86 |
+
self.graph_builder.add_edge("coder_subgraph","human_fb_code")
|
| 87 |
+
self.graph_builder.add_edge("human_fb_code","security_check_generator")
|
| 88 |
+
self.graph_builder.add_edge("security_check_generator","human_fb_review")
|
| 89 |
+
self.graph_builder.add_conditional_edges("human_fb_review",
|
| 90 |
+
self.human_fb_review.security_review,
|
| 91 |
+
["security_check_generator", "test_cases_generator"])
|
| 92 |
+
self.graph_builder.add_edge("test_cases_generator","human_fb_testcases")
|
| 93 |
+
self.graph_builder.add_conditional_edges("human_fb_testcases",
|
| 94 |
+
self.human_fb_testcases.testcase_review,
|
| 95 |
+
["test_cases_generator", "qa_testing_node"])
|
| 96 |
+
self.graph_builder.add_edge("qa_testing_node","human_fb_qatesting")
|
| 97 |
+
self.graph_builder.add_conditional_edges("human_fb_qatesting",
|
| 98 |
+
self.human_fb_qatesting.check_qa_response,
|
| 99 |
+
["deployment_node", "coder_subgraph"])
|
| 100 |
+
self.graph_builder.add_edge("deployment_node","monitoring_node")
|
| 101 |
+
self.graph_builder.add_edge("monitoring_node","monitor_fb_node")
|
| 102 |
+
self.graph_builder.add_edge("monitor_fb_node","maintanence_node")
|
| 103 |
+
self.graph_builder.add_edge("maintanence_node","consolidated_node")
|
| 104 |
+
self.graph_builder.add_edge("consolidated_node",END)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def setup_graph(self):
|
| 108 |
+
"""
|
| 109 |
+
Sets up the graph
|
| 110 |
+
"""
|
| 111 |
+
self.build_graph()
|
| 112 |
+
#print("[DEBUG] Nodes in Graph:", self.graph_builder.nodes)
|
| 113 |
+
#assert "human_fb_code" in self.graph_builder.nodes, "[ERROR] human_fb_code is missing!"
|
| 114 |
+
return self.graph_builder.compile(checkpointer=self.memory)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
src/sdlc/graph/subgraph_builder.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langgraph.graph import StateGraph, START,END
|
| 2 |
+
from src.sdlc.states.states import CoderState
|
| 3 |
+
from src.sdlc.nodes.code_orchestrator import CodeOrchestratorNode
|
| 4 |
+
from src.sdlc.nodes.code_generation_node import CodeGenerationNode
|
| 5 |
+
from src.sdlc.nodes.synthesizer_node import SynthesizerNode
|
| 6 |
+
from src.sdlc.nodes.code_reviewer_node import CodeReviewerNode
|
| 7 |
+
|
| 8 |
+
class SubGraphBuilder:
|
| 9 |
+
|
| 10 |
+
def __init__(self,model):
|
| 11 |
+
self.llm=model
|
| 12 |
+
self.graph_builder=StateGraph(CoderState)
|
| 13 |
+
|
| 14 |
+
def build_graph(self):
|
| 15 |
+
"""
|
| 16 |
+
Builds a subgraph for code generation.
|
| 17 |
+
"""
|
| 18 |
+
self.code_orchestrator_node=CodeOrchestratorNode(self.llm)
|
| 19 |
+
self.code_generation_node=CodeGenerationNode(self.llm)
|
| 20 |
+
self.synthesizer_node=SynthesizerNode(self.llm)
|
| 21 |
+
self.code_reviewer_node=CodeReviewerNode(self.llm)
|
| 22 |
+
|
| 23 |
+
self.graph_builder.add_node("code_orchestrator",self.code_orchestrator_node.process)
|
| 24 |
+
self.graph_builder.add_node("code_generation_node", self.code_generation_node.process)
|
| 25 |
+
self.graph_builder.add_node("code_synthesizer",self.synthesizer_node.process)
|
| 26 |
+
self.graph_builder.add_node("code_reviewer",self.code_reviewer_node.process)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
self.graph_builder.add_edge(START,"code_orchestrator")
|
| 30 |
+
self.graph_builder.add_conditional_edges("code_orchestrator",
|
| 31 |
+
self.code_orchestrator_node.assign_workers,
|
| 32 |
+
path_map=["code_generation_node"])
|
| 33 |
+
self.graph_builder.add_edge("code_generation_node", "code_synthesizer")
|
| 34 |
+
self.graph_builder.add_edge("code_synthesizer", "code_reviewer")
|
| 35 |
+
self.graph_builder.add_edge("code_reviewer", END)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def setup_graph(self):
|
| 39 |
+
"""
|
| 40 |
+
Sets up the graph
|
| 41 |
+
"""
|
| 42 |
+
self.build_graph()
|
| 43 |
+
return self.graph_builder.compile()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
|
src/sdlc/main.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from src.sdlc.ui.streamlitui.loadui import LoadStreamlitUI
|
| 3 |
+
from src.sdlc.LLMS.groqllm import GroqLLM
|
| 4 |
+
from src.sdlc.LLMS.openaillm import OpenAILLM
|
| 5 |
+
from src.sdlc.ui.streamlitui.display_result import DisplayResultStreamlit
|
| 6 |
+
from src.sdlc.ui.uiconfigfile import Config
|
| 7 |
+
from src.sdlc.utils.utils import clear_cache_data
|
| 8 |
+
from src.sdlc import logger
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def load_sdlc_app():
|
| 12 |
+
# Check if session state page exists
|
| 13 |
+
if "page" not in st.session_state:
|
| 14 |
+
st.session_state.page = "home" # Default page
|
| 15 |
+
if st.session_state.page == "home":
|
| 16 |
+
# Load UI
|
| 17 |
+
ui = LoadStreamlitUI()
|
| 18 |
+
user_input = ui.load_streamlit_ui()
|
| 19 |
+
|
| 20 |
+
# Title
|
| 21 |
+
st.title("🛠️ " + Config().get_page_title())
|
| 22 |
+
logger.info("In home page")
|
| 23 |
+
|
| 24 |
+
with st.form("sdlc_form"):
|
| 25 |
+
# User Input for Requirements
|
| 26 |
+
user_requirements = st.text_area("📌 Enter your project requirements:")
|
| 27 |
+
if st.form_submit_button("🚀 Start SDLC Cycle") and user_requirements.strip():
|
| 28 |
+
try:
|
| 29 |
+
clear_cache_data() #Clear data from previous cycle
|
| 30 |
+
# Initialize LLM
|
| 31 |
+
st.session_state.selected_llm=user_input['selected_llm']
|
| 32 |
+
if user_input['selected_llm'] == 'Groq':
|
| 33 |
+
obj_llm_config = GroqLLM(user_controls_input=user_input)
|
| 34 |
+
elif user_input['selected_llm'] == 'OpenAI':
|
| 35 |
+
obj_llm_config = OpenAILLM(user_controls_input=user_input)
|
| 36 |
+
else:
|
| 37 |
+
st.error("Invalid LLM selection.")
|
| 38 |
+
return
|
| 39 |
+
|
| 40 |
+
model = obj_llm_config.get_llm_model()
|
| 41 |
+
if not model:
|
| 42 |
+
st.error("Error: LLM model could not be initialized.")
|
| 43 |
+
return
|
| 44 |
+
|
| 45 |
+
# Store values in session state
|
| 46 |
+
st.session_state.sdlc_started = True
|
| 47 |
+
st.session_state.user_input = user_input
|
| 48 |
+
st.session_state.user_requirements = user_requirements # Store input
|
| 49 |
+
|
| 50 |
+
# Switch to the result page
|
| 51 |
+
st.session_state.page = "sdlc_result"
|
| 52 |
+
logger.info("Submitted user requirements: %s", user_requirements)
|
| 53 |
+
st.rerun() # Re-run app to navigate
|
| 54 |
+
|
| 55 |
+
except Exception as e:
|
| 56 |
+
st.error(f"Error: {e}")
|
| 57 |
+
elif st.session_state.page == "sdlc_result":
|
| 58 |
+
# Add a button to go back
|
| 59 |
+
if st.button("🔙 Restart SDLC Cycle"):
|
| 60 |
+
clear_cache_data()
|
| 61 |
+
st.rerun()
|
| 62 |
+
|
| 63 |
+
with st.spinner("Processing SDLC Results..."):
|
| 64 |
+
# Call the DisplayResultStreamlit class
|
| 65 |
+
logger.info("Inside state page : ")
|
| 66 |
+
DisplayResultStreamlit().display_result_on_ui()
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
|
src/sdlc/nodes/__init__.py
ADDED
|
File without changes
|
src/sdlc/nodes/code_feedback.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class CodeFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation matching your working UserStoriesFeedback pattern
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def process(self, state: State):
|
| 12 |
+
"""No-op node that gets interrupted"""
|
| 13 |
+
|
| 14 |
+
logger.info("[DEBUG] Entering human_fb_code process")
|
| 15 |
+
human_code_review = interrupt(
|
| 16 |
+
{
|
| 17 |
+
"generated_code_review": state.get("generated_code_review","")
|
| 18 |
+
}
|
| 19 |
+
)
|
| 20 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 21 |
+
logger.info(f"[DEBUG] Resuming human_fb_code process.Received code review : {human_code_review}")
|
| 22 |
+
|
| 23 |
+
return {
|
| 24 |
+
"generated_code_review": human_code_review
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
# def code_file_review(self, state: State):
|
| 28 |
+
# """Return the next node to execute based on review status"""
|
| 29 |
+
# code_review = state.get('generated_code_review', '')
|
| 30 |
+
# print("IN CODE REVIEW of Code Feedback:", code_review)
|
| 31 |
+
|
| 32 |
+
# # If we have review data, decide next step
|
| 33 |
+
# if code_review:
|
| 34 |
+
# # Assuming code_review is a dict with an 'approved' flag
|
| 35 |
+
# # if code_review.get('approved', False):
|
| 36 |
+
# # return END # Proceed to end if approved
|
| 37 |
+
# print("Interrupting human feedback")
|
| 38 |
+
# return "human_fb_code" # Return for revisions if not approved
|
| 39 |
+
|
| 40 |
+
# # Default path when no review exists yet
|
| 41 |
+
# return "security_review_node"
|
| 42 |
+
|
src/sdlc/nodes/code_generation_node.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import WorkerState
|
| 2 |
+
from src.sdlc.prompts.prompts import CODE_GEN_INSTRUCTIONS
|
| 3 |
+
from src.sdlc.schema.codefiles import CodeFile
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class CodeGenerationNode:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation to generate code for each code file type from orchestrator.
|
| 9 |
+
"""
|
| 10 |
+
def __init__(self,model):
|
| 11 |
+
self.llm=model.with_structured_output(CodeFile)
|
| 12 |
+
|
| 13 |
+
def process(self, state: WorkerState) -> dict:
|
| 14 |
+
"""
|
| 15 |
+
Processes the input state and generates code files.
|
| 16 |
+
"""
|
| 17 |
+
code_review=state.get('generated_code_review', '')
|
| 18 |
+
code_file=self.llm.invoke(CODE_GEN_INSTRUCTIONS.format(code_review=code_review,
|
| 19 |
+
codefilename=state['codefiletype'].name,
|
| 20 |
+
codefiledescription=state['codefiletype'].description))
|
| 21 |
+
logger.info(f"In Code generation node for code file : {state['codefiletype'].name}")
|
| 22 |
+
return {"generated_files": [code_file]}
|
| 23 |
+
|
src/sdlc/nodes/code_orchestrator.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import CoderState
|
| 2 |
+
from src.sdlc.prompts.prompts import CODE_ORCHESTRATOR__INSTRNS
|
| 3 |
+
from src.sdlc.schema.codefiletypes import CodeFileTypes
|
| 4 |
+
from langgraph.constants import Send
|
| 5 |
+
from src.sdlc import logger
|
| 6 |
+
|
| 7 |
+
class CodeOrchestratorNode:
|
| 8 |
+
"""Orchestrator that generates an architecture plan for the code"""
|
| 9 |
+
|
| 10 |
+
def __init__(self,model):
|
| 11 |
+
# Augment the LLM with schema for structured output
|
| 12 |
+
self.planner = model.with_structured_output(CodeFileTypes)
|
| 13 |
+
|
| 14 |
+
def process(self, state: CoderState):
|
| 15 |
+
"""
|
| 16 |
+
Processes the input state and generates code file types.
|
| 17 |
+
"""
|
| 18 |
+
response= self.planner.invoke(CODE_ORCHESTRATOR__INSTRNS.format(design_documents=state["design_summary"]))
|
| 19 |
+
logger.info(f"In orchestrator node, response is : {response.codefiletypes}")
|
| 20 |
+
return {"codefiletypes":response.codefiletypes}
|
| 21 |
+
|
| 22 |
+
# Conditional edge function to create code_generation_node workers that each write each code file
|
| 23 |
+
def assign_workers(self,state: CoderState):
|
| 24 |
+
"""Assign a worker to each code file in the plan"""
|
| 25 |
+
code_review=state.get('generated_code_review','')
|
| 26 |
+
logger.info("In orchestrator node, assigning workers for code files...")
|
| 27 |
+
# Kick off section writing in parallel via Send() API
|
| 28 |
+
return [Send("code_generation_node", {"generated_code_review":code_review,
|
| 29 |
+
"codefiletype": s}) for s in state["codefiletypes"]]
|
| 30 |
+
|
src/sdlc/nodes/code_reviewer_node.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import CoderState
|
| 2 |
+
from src.sdlc.prompts.prompts import CODE_REVIEW_INSTRNS
|
| 3 |
+
from src.sdlc.schema.codefiletypes import CodeFileTypes
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
import time
|
| 6 |
+
import groq
|
| 7 |
+
|
| 8 |
+
class CodeReviewerNode:
|
| 9 |
+
"""
|
| 10 |
+
Node logic implementation.
|
| 11 |
+
"""
|
| 12 |
+
def __init__(self,model):
|
| 13 |
+
self.llm = model
|
| 14 |
+
|
| 15 |
+
def process(self, state: CoderState):
|
| 16 |
+
"""
|
| 17 |
+
Processes the input state and reviews the code based on the design documents.
|
| 18 |
+
Sends the files in batches of 3 for token limit management.
|
| 19 |
+
"""
|
| 20 |
+
generated_code = state.get("generated_code", {})
|
| 21 |
+
design_summary = state["design_summary"]
|
| 22 |
+
|
| 23 |
+
# Convert the dictionary into a list of (filename, content) pairs
|
| 24 |
+
files_list = list(generated_code.items())
|
| 25 |
+
|
| 26 |
+
batch_size = 3
|
| 27 |
+
num_batches = min((len(files_list) + batch_size - 1) // batch_size, 4) # Limit to 10 files
|
| 28 |
+
|
| 29 |
+
all_reviews = []
|
| 30 |
+
|
| 31 |
+
for i in range(num_batches):
|
| 32 |
+
batch = dict(files_list[i * batch_size : (i + 1) * batch_size]) # Create a dictionary from the batch
|
| 33 |
+
try:
|
| 34 |
+
response = self.llm.invoke(CODE_REVIEW_INSTRNS.format(
|
| 35 |
+
design_documents=design_summary,
|
| 36 |
+
generated_code=batch
|
| 37 |
+
))
|
| 38 |
+
|
| 39 |
+
logger.info(f"In code reviewer node. Batch {i+1} Response: {response.content}")
|
| 40 |
+
all_reviews.append(response.content)
|
| 41 |
+
except groq.RateLimitError as e:
|
| 42 |
+
wait_time = 70 # Wait a bit more than 1 minute
|
| 43 |
+
logger.info(f"Rate limit reached. Waiting for {wait_time} seconds...")
|
| 44 |
+
time.sleep(wait_time) # Sleep and retry
|
| 45 |
+
response = self.llm.invoke(CODE_REVIEW_INSTRNS.format(
|
| 46 |
+
design_documents=design_summary,
|
| 47 |
+
generated_code=batch
|
| 48 |
+
))
|
| 49 |
+
logger.info(f"Batch {i+1} Review after wait: ", response.content)
|
| 50 |
+
all_reviews.append(response.content)
|
| 51 |
+
|
| 52 |
+
# Combine all reviews into a single string
|
| 53 |
+
combined_review = "\n\n".join(all_reviews)
|
| 54 |
+
|
| 55 |
+
return {"generated_code_review": combined_review}
|
| 56 |
+
|
| 57 |
+
|
src/sdlc/nodes/code_subgraph_node.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.graph.subgraph_builder import SubGraphBuilder
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class CoderSubgraphNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def process(self, state: State):
|
| 14 |
+
"""
|
| 15 |
+
Processes the input state and generates code files based on design documents.
|
| 16 |
+
"""
|
| 17 |
+
sub_graph_builder = SubGraphBuilder(self.llm)
|
| 18 |
+
self.sub_graph = sub_graph_builder.setup_graph()
|
| 19 |
+
|
| 20 |
+
design = state["design_summary"]
|
| 21 |
+
# Now execute the subgraph
|
| 22 |
+
response = self.sub_graph.invoke({"design_summary": design})
|
| 23 |
+
logger.info("INVOKING SUBGRAPH FOR CODE GENERATION...")
|
| 24 |
+
return response
|
| 25 |
+
|
| 26 |
+
|
src/sdlc/nodes/consolidated_node.py
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc import logger
|
| 3 |
+
|
| 4 |
+
class ConsolidatedNode:
|
| 5 |
+
"""
|
| 6 |
+
Node logic implementation.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
def process(self, state: State) -> dict:
|
| 10 |
+
"""
|
| 11 |
+
Gets all artifacts from state to display.
|
| 12 |
+
"""
|
| 13 |
+
logger.info("IN CONSOLIDATED NODE,displaying artifacts...")
|
| 14 |
+
return {"consolidated_artifacts":"Display artifacts"}
|
| 15 |
+
|
src/sdlc/nodes/deployment_node.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import DEPLOYMENT_INSTRUCTIONS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class DeploymentNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State) -> dict:
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates deployment instructions based on design document.
|
| 15 |
+
"""
|
| 16 |
+
design_summary=state.get('design_summary', '')
|
| 17 |
+
response=self.llm.invoke(DEPLOYMENT_INSTRUCTIONS.format(design_document=design_summary))
|
| 18 |
+
logger.info(f"In DEPLOYMENT NODE ,response received")
|
| 19 |
+
return {"deployment":response.content}
|
| 20 |
+
|
src/sdlc/nodes/design_documents_feedback.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class DesignDocumentsFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation.
|
| 9 |
+
"""
|
| 10 |
+
def process(self, state: State):
|
| 11 |
+
""" No-op node that should be interrupted on """
|
| 12 |
+
logger.info("[DEBUG] Entering human_fb_design process")
|
| 13 |
+
human_design_review = interrupt(
|
| 14 |
+
{
|
| 15 |
+
"design_documents_review": state.get('design_documents_review', "")
|
| 16 |
+
}
|
| 17 |
+
)
|
| 18 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 19 |
+
logger.info(f"RESUMING DESIGN FEEDBACK NODE, feedback received : {human_design_review}")
|
| 20 |
+
return {
|
| 21 |
+
"design_documents_review": human_design_review
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def design_document_review(self,state: State):
|
| 25 |
+
""" Return the next node to execute """
|
| 26 |
+
# Check if human feedback
|
| 27 |
+
design_review=state.get('design_documents_review', "")
|
| 28 |
+
logger.info("IN DESIGN REVIEW, determining flow based on design review...")
|
| 29 |
+
if design_review:
|
| 30 |
+
return "design_documents_generator"
|
| 31 |
+
|
| 32 |
+
# Otherwise summarize design docs
|
| 33 |
+
else:
|
| 34 |
+
return "design_summarizer"
|
| 35 |
+
|
src/sdlc/nodes/design_documents_node.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import DESIGNDOCS_GEN_INSTRNS,DESIGN_MODIFY_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class DesignDocumentsNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State):
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates design document based on user stories.
|
| 15 |
+
"""
|
| 16 |
+
design_review=state.get("design_documents_review", "")
|
| 17 |
+
design_doc=state.get("design_documents", "")
|
| 18 |
+
if design_review:
|
| 19 |
+
response=self.llm.invoke(DESIGN_MODIFY_INSTRNS.format(design_review=design_review,
|
| 20 |
+
design_documents=design_doc))
|
| 21 |
+
logger.info("IN MODIFY DESIGN DOCS...")
|
| 22 |
+
else:
|
| 23 |
+
response=self.llm.invoke(DESIGNDOCS_GEN_INSTRNS.format(user_stories=state["user_stories"],
|
| 24 |
+
design_documents=design_doc,
|
| 25 |
+
design_review=design_review))
|
| 26 |
+
logger.info("IN DESIGN DOCS GENERATION...")
|
| 27 |
+
return {"design_documents":response.content}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
|
src/sdlc/nodes/design_summarize.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import SUMMARIZE_DESIGN_DOCS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class DesignSummarizeNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State):
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates design document based on user stories.
|
| 15 |
+
"""
|
| 16 |
+
#Summarize the design documents for the code generation
|
| 17 |
+
summarized_docs=self.llm.invoke(SUMMARIZE_DESIGN_DOCS.format(design_documents=state["design_documents"]))
|
| 18 |
+
logger.info(f"In DESIGN SUMMARIZN NODE,response received : {summarized_docs.content}")
|
| 19 |
+
return {"design_summary": summarized_docs.content}
|
| 20 |
+
|
src/sdlc/nodes/maintanence_node.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import MAINTANENCE_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class MaintanenceNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State) -> dict:
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates maintanence instructions based on design.
|
| 15 |
+
"""
|
| 16 |
+
monitoring_feedback=state.get('monitoring_and_feedback_review', '')
|
| 17 |
+
response=self.llm.invoke(MAINTANENCE_INSTRNS.format(user_feedback=monitoring_feedback))
|
| 18 |
+
logger.info(f"In MAINTANENCE NODE, received response")
|
| 19 |
+
return {"maintanence_and_updates":response.content}
|
| 20 |
+
|
src/sdlc/nodes/monitor_fb_node.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class MonitorFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def process(self, state: State):
|
| 12 |
+
""" No-op node that should be interrupted on """
|
| 13 |
+
logger.info("[DEBUG] Entering human_fb_monitoring process")
|
| 14 |
+
|
| 15 |
+
human_monitoring_fb= interrupt(
|
| 16 |
+
{
|
| 17 |
+
"monitoring_and_feedback_review": state.get('monitoring_and_feedback_review', "")
|
| 18 |
+
}
|
| 19 |
+
)
|
| 20 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 21 |
+
logger.info(f"RESUMING MONITORING FEEDBACK NODE,feedback received : {human_monitoring_fb}")
|
| 22 |
+
|
| 23 |
+
return {
|
| 24 |
+
"monitoring_and_feedback_review": human_monitoring_fb
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
|
src/sdlc/nodes/monitoring_node.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import MONITORING_FB_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class MonitoringNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State) -> dict:
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates monitoring and feedback instructions based on deployment.
|
| 15 |
+
"""
|
| 16 |
+
deployment_instructions=state.get('deployment', '')
|
| 17 |
+
response=self.llm.invoke(MONITORING_FB_INSTRNS.format(deployment_instructions=deployment_instructions))
|
| 18 |
+
logger.info(f"In GEN MONITORING AND FEEDBACK INSTRUCTIONS,response received...")
|
| 19 |
+
return {"monitoring_and_feedback":response.content}
|
| 20 |
+
|
src/sdlc/nodes/qa_feedback.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class QAFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation.
|
| 9 |
+
"""
|
| 10 |
+
def process(self, state: State):
|
| 11 |
+
""" No-op node that should be interrupted on """
|
| 12 |
+
logger.info("[DEBUG] Entering human_fb_qatesting process")
|
| 13 |
+
qa_result = interrupt(
|
| 14 |
+
{
|
| 15 |
+
"qa_result": state.get('qa_testing', "")
|
| 16 |
+
}
|
| 17 |
+
)
|
| 18 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 19 |
+
logger.info(f"RESUMING QATESTING FEEDBACK NODE ,qa etsting result : {qa_result['result']}")
|
| 20 |
+
return {
|
| 21 |
+
"qa_status": qa_result["result"]
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def check_qa_response(self,state):
|
| 25 |
+
qa_status=state.get("qa_status","")
|
| 26 |
+
if qa_status=="Passed":
|
| 27 |
+
return "deployment_node"
|
| 28 |
+
else:
|
| 29 |
+
return "coder_subgraph"
|
src/sdlc/nodes/qatesting_node.py
ADDED
|
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import TESTCASES_GEN_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
import traceback
|
| 5 |
+
from typing import List, Tuple, Dict
|
| 6 |
+
import re
|
| 7 |
+
import io
|
| 8 |
+
import contextlib
|
| 9 |
+
from copy import deepcopy
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class QATestingNode:
|
| 13 |
+
"""
|
| 14 |
+
Node logic implementation for QA Testing.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
@staticmethod
|
| 18 |
+
def markdown_test_cases_to_python(markdown: str) -> Tuple[str, List[Dict]]:
|
| 19 |
+
lines = markdown.strip().split("\n")
|
| 20 |
+
data_lines = [line.strip() for line in lines if line.strip()]
|
| 21 |
+
header_index = next((i for i, line in enumerate(data_lines) if "---" in line), None)
|
| 22 |
+
|
| 23 |
+
if header_index is None or header_index == 0:
|
| 24 |
+
raise ValueError("Markdown table is malformed or missing header separator.")
|
| 25 |
+
|
| 26 |
+
headers = [h.strip() for h in data_lines[0].split('|')[1:-1]]
|
| 27 |
+
rows = data_lines[header_index + 1:]
|
| 28 |
+
|
| 29 |
+
test_funcs: List[str] = []
|
| 30 |
+
metadata: List[Dict] = []
|
| 31 |
+
|
| 32 |
+
for row in rows:
|
| 33 |
+
columns = [col.strip() for col in row.split('|')[1:-1]]
|
| 34 |
+
if len(columns) != len(headers):
|
| 35 |
+
continue
|
| 36 |
+
|
| 37 |
+
case_id, use_case, scenario, steps, expected, test_type = columns
|
| 38 |
+
func_name = re.sub(r'\W+', '_', f"test_{case_id}_{use_case}_{scenario}").lower()
|
| 39 |
+
step_lines = steps.replace("<br>", "\n").split("\n")
|
| 40 |
+
step_code = "\n ".join(f"# {step.strip()}" for step in step_lines)
|
| 41 |
+
expected_comment = f"# Expected: {expected}"
|
| 42 |
+
|
| 43 |
+
test_func = f"""
|
| 44 |
+
def {func_name}():
|
| 45 |
+
{step_code}
|
| 46 |
+
{expected_comment}
|
| 47 |
+
print("Simulated Output for: {expected}") # Add dummy output
|
| 48 |
+
assert True # TODO: Replace with real assertion
|
| 49 |
+
"""
|
| 50 |
+
test_funcs.append(test_func.strip())
|
| 51 |
+
|
| 52 |
+
metadata.append({
|
| 53 |
+
"func_name": func_name,
|
| 54 |
+
"Test Case ID": case_id,
|
| 55 |
+
"Use Case": use_case,
|
| 56 |
+
"Test Scenario": scenario,
|
| 57 |
+
"Test Steps": steps,
|
| 58 |
+
"Expected Result": expected,
|
| 59 |
+
"Test Type": test_type
|
| 60 |
+
})
|
| 61 |
+
|
| 62 |
+
return "\n\n".join(test_funcs), metadata
|
| 63 |
+
|
| 64 |
+
def process(self, state):
|
| 65 |
+
generated_code = state.get("generated_code", {})
|
| 66 |
+
test_cases_markdown = state.get("test_cases", "")
|
| 67 |
+
|
| 68 |
+
overall_results = []
|
| 69 |
+
|
| 70 |
+
# Parse test cases once
|
| 71 |
+
try:
|
| 72 |
+
test_code, metadata = QATestingNode.markdown_test_cases_to_python(test_cases_markdown)
|
| 73 |
+
except Exception:
|
| 74 |
+
return {
|
| 75 |
+
"qa_testing": {
|
| 76 |
+
"result":"Failed",
|
| 77 |
+
"summary": "❌ Failed to parse test cases.",
|
| 78 |
+
"table": "",
|
| 79 |
+
"details": [{
|
| 80 |
+
"Test Case ID": "Unknown",
|
| 81 |
+
"Use Case": "Global",
|
| 82 |
+
"Test Scenario": "Test case markdown parsing failed",
|
| 83 |
+
"Test Steps": "-",
|
| 84 |
+
"Expected Result": "-",
|
| 85 |
+
"Actual Output": "-",
|
| 86 |
+
"Test Type": "-",
|
| 87 |
+
"Status": "❌ Fail",
|
| 88 |
+
"Error": traceback.format_exc()
|
| 89 |
+
}]
|
| 90 |
+
}
|
| 91 |
+
}
|
| 92 |
+
|
| 93 |
+
# ✅ One shared environment
|
| 94 |
+
local_env = {}
|
| 95 |
+
try:
|
| 96 |
+
# Load all code files into same namespace
|
| 97 |
+
for file_path, code in generated_code.items():
|
| 98 |
+
if not file_path.endswith(".py"):
|
| 99 |
+
logger.warning(f"Skipping non-Python file: {file_path}")
|
| 100 |
+
continue
|
| 101 |
+
try:
|
| 102 |
+
exec(code, local_env)
|
| 103 |
+
except Exception as e:
|
| 104 |
+
logger.error(f"Failed to exec {file_path}: {e}")
|
| 105 |
+
|
| 106 |
+
exec(test_code, local_env)
|
| 107 |
+
|
| 108 |
+
for test_meta_orig in metadata:
|
| 109 |
+
test_meta = deepcopy(test_meta_orig)
|
| 110 |
+
func_name = test_meta.get("func_name")
|
| 111 |
+
|
| 112 |
+
try:
|
| 113 |
+
test_func = local_env.get(func_name)
|
| 114 |
+
if not test_func:
|
| 115 |
+
raise ValueError(f"Function '{func_name}' not found")
|
| 116 |
+
|
| 117 |
+
# Capture stdout
|
| 118 |
+
f = io.StringIO()
|
| 119 |
+
with contextlib.redirect_stdout(f):
|
| 120 |
+
result = test_func()
|
| 121 |
+
output = f.getvalue().strip()
|
| 122 |
+
test_meta["Actual Output"] = output or str(result)
|
| 123 |
+
test_meta["Status"] = "✅ Pass"
|
| 124 |
+
|
| 125 |
+
except Exception:
|
| 126 |
+
test_meta["Actual Output"] = "-"
|
| 127 |
+
test_meta["Status"] = "❌ Fail"
|
| 128 |
+
test_meta["Error"] = traceback.format_exc()
|
| 129 |
+
|
| 130 |
+
overall_results.append(test_meta)
|
| 131 |
+
|
| 132 |
+
except Exception:
|
| 133 |
+
for test_meta_orig in metadata:
|
| 134 |
+
test_meta = deepcopy(test_meta_orig)
|
| 135 |
+
test_meta["Status"] = "❌ Fail"
|
| 136 |
+
test_meta["Actual Output"] = "-"
|
| 137 |
+
test_meta["Error"] = traceback.format_exc()
|
| 138 |
+
overall_results.append(test_meta)
|
| 139 |
+
|
| 140 |
+
# Markdown table generation
|
| 141 |
+
table_header = (
|
| 142 |
+
"| Test Case ID | Use Case | Test Scenario | Test Steps | Expected Result | Actual Output | Test Type | Status |\n"
|
| 143 |
+
"| --- | --- | --- | --- | --- | --- | --- | --- |\n"
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
table_rows = ""
|
| 147 |
+
for r in overall_results:
|
| 148 |
+
table_rows += (
|
| 149 |
+
f"| {r.get('Test Case ID')} | {r.get('Use Case')} | {r.get('Test Scenario')} | "
|
| 150 |
+
f"{r.get('Test Steps')} | {r.get('Expected Result')} | {r.get('Actual Output')} | "
|
| 151 |
+
f"{r.get('Test Type')} | {r.get('Status')} |\n"
|
| 152 |
+
)
|
| 153 |
+
|
| 154 |
+
markdown_table = table_header + table_rows
|
| 155 |
+
summary = "✅ All tests passed" if all(r["Status"] == "✅ Pass" for r in overall_results) else "❌ Some tests failed"
|
| 156 |
+
logger.info(f"IN QA TESTING NODE, results : {summary}")
|
| 157 |
+
return {
|
| 158 |
+
"qa_testing": {
|
| 159 |
+
"result":"Passed",
|
| 160 |
+
"summary": summary,
|
| 161 |
+
"table": markdown_table,
|
| 162 |
+
"details": overall_results
|
| 163 |
+
}
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
|
src/sdlc/nodes/security_check_node.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import SECURITY_REVIEW_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class SecurityCheckNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State):
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and reviews the code for security.
|
| 15 |
+
"""
|
| 16 |
+
generated_code=state.get('generated_code', '')
|
| 17 |
+
|
| 18 |
+
response=self.llm.invoke(SECURITY_REVIEW_INSTRNS.format(generated_code=generated_code))
|
| 19 |
+
logger.info("In SECURITY CHECK NODE,received response")
|
| 20 |
+
return {"security_check":response.content}
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
|
src/sdlc/nodes/security_feedback.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class SecurityReviewFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def process(self, state: State):
|
| 12 |
+
""" No-op node that should be interrupted on """
|
| 13 |
+
logger.info("[DEBUG] Entering human_fb_review process")
|
| 14 |
+
human_security_review = interrupt(
|
| 15 |
+
{
|
| 16 |
+
"security_check_review": state.get("security_check_review","")
|
| 17 |
+
}
|
| 18 |
+
)
|
| 19 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 20 |
+
logger.info(f'RESUMING SECURITY FEEDBACK NODE, feedback is {human_security_review}')
|
| 21 |
+
return {
|
| 22 |
+
"security_check_review": human_security_review
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
def security_review(self,state: State):
|
| 26 |
+
""" Return the next node to execute """
|
| 27 |
+
# Check if human feedback
|
| 28 |
+
security_check_review=state.get('security_check_review', "")
|
| 29 |
+
logger.info("IN SECURITY REVIEW, determining flow...")
|
| 30 |
+
if security_check_review:
|
| 31 |
+
return "security_check_generator"
|
| 32 |
+
|
| 33 |
+
# Otherwise
|
| 34 |
+
return "test_cases_generator"
|
| 35 |
+
|
src/sdlc/nodes/synthesizer_node.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import CoderState
|
| 2 |
+
from src.sdlc.prompts.prompts import FILE_MODIFY_INSTRNS
|
| 3 |
+
from src.sdlc.schema.codefiletypes import CodeFileTypes
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class SynthesizerNode:
|
| 7 |
+
"""Node logic implementation for synthesizing full code from multiple files."""
|
| 8 |
+
|
| 9 |
+
def __init__(self, model):
|
| 10 |
+
self.llm = model.with_structured_output(CodeFileTypes)
|
| 11 |
+
|
| 12 |
+
def process(self, state: CoderState):
|
| 13 |
+
"""Synthesize full code from multiple generated files."""
|
| 14 |
+
#expected_files = state["codefiletypes"] # List of expected file types
|
| 15 |
+
generated_files = state.get("generated_files", [])
|
| 16 |
+
|
| 17 |
+
logger.info(f"IN code_synthesizer, generated code files : {generated_files}")
|
| 18 |
+
|
| 19 |
+
# Wait for all expected files before proceeding
|
| 20 |
+
# if len(generated_files) < len(expected_files):
|
| 21 |
+
# print(f"⚠️ Waiting for all generated files. Current: {len(generated_files)}, Expected: {len(expected_files)}")
|
| 22 |
+
# return # Do nothing, let LangGraph retry when more data arrives
|
| 23 |
+
|
| 24 |
+
# Convert to dictionary format: {filename: code}
|
| 25 |
+
generated_code = {code_file.name: code_file.code for code_file in generated_files}
|
| 26 |
+
|
| 27 |
+
return {"generated_code": generated_code}
|
src/sdlc/nodes/test_cases_feedback.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class TestCasesFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def process(self, state: State):
|
| 12 |
+
""" No-op node that should be interrupted on """
|
| 13 |
+
logger.info("[DEBUG] Entering human_fb_testcases process")
|
| 14 |
+
human_testcases_review = interrupt(
|
| 15 |
+
{
|
| 16 |
+
"test_cases_review": state.get("test_cases_review","")
|
| 17 |
+
}
|
| 18 |
+
)
|
| 19 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 20 |
+
logger.info(f'RESUMING TEST CASES FEEDBACK NODE, feedback received : {human_testcases_review}')
|
| 21 |
+
return {
|
| 22 |
+
"test_cases_review": human_testcases_review
|
| 23 |
+
}
|
| 24 |
+
|
| 25 |
+
def testcase_review(self,state: State):
|
| 26 |
+
""" Return the next node to execute """
|
| 27 |
+
# Check if human feedback
|
| 28 |
+
test_cases_review=state.get('test_cases_review', "")
|
| 29 |
+
logger.info("IN TEST CASE REVIEW,determining flow...")
|
| 30 |
+
if test_cases_review:
|
| 31 |
+
return "test_cases_generator"
|
| 32 |
+
|
| 33 |
+
# Otherwise
|
| 34 |
+
return "qa_testing_node"
|
| 35 |
+
|
src/sdlc/nodes/test_cases_node.py
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import TESTCASES_GEN_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class TestCasesNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State):
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates test cases for the code
|
| 15 |
+
"""
|
| 16 |
+
design_summary=state.get('design_summary', '')
|
| 17 |
+
response=self.llm.invoke(TESTCASES_GEN_INSTRNS.format(design_documents=design_summary))
|
| 18 |
+
logger.info("In TEST CASES NODE received response")
|
| 19 |
+
return {"test_cases":response.content}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
src/sdlc/nodes/userstories_feedback.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from langgraph.graph import END
|
| 3 |
+
from langgraph.types import interrupt
|
| 4 |
+
from src.sdlc import logger
|
| 5 |
+
|
| 6 |
+
class UserStoriesFeedback:
|
| 7 |
+
"""
|
| 8 |
+
Node logic implementation.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
def process(self, state: State):
|
| 12 |
+
""" No-op node that should be interrupted on """
|
| 13 |
+
logger.info("[DEBUG] Entering human_fb_user stories process")
|
| 14 |
+
|
| 15 |
+
human_userstory_review = interrupt(
|
| 16 |
+
{
|
| 17 |
+
"user_stories_review": state.get('user_stories_review', "")
|
| 18 |
+
}
|
| 19 |
+
)
|
| 20 |
+
# Update the state with the human's input or route the graph based on the input.
|
| 21 |
+
logger.info(f'RESUMING HUMAN USERSTORIES FEEDBACK NODE, feedback is : {human_userstory_review}')
|
| 22 |
+
|
| 23 |
+
return {
|
| 24 |
+
"user_stories_review": human_userstory_review
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
def user_story_review(self,state: State):
|
| 28 |
+
""" Return the next node to execute """
|
| 29 |
+
# Check if human feedback
|
| 30 |
+
user_stories_review=state.get('user_stories_review', "")
|
| 31 |
+
logger.info("IN USER STORY REVIEW,determining flow...")
|
| 32 |
+
if user_stories_review:
|
| 33 |
+
return "userstories_generator"
|
| 34 |
+
|
| 35 |
+
# Otherwise design_documents_node
|
| 36 |
+
return "design_documents_generator"
|
| 37 |
+
|
src/sdlc/nodes/userstories_node.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from src.sdlc.states.states import State
|
| 2 |
+
from src.sdlc.prompts.prompts import USERSTORY_GEN_INSTRNS,USERSTORY_MODIFY_INSTRNS
|
| 3 |
+
from src.sdlc import logger
|
| 4 |
+
|
| 5 |
+
class UserStoriesNode:
|
| 6 |
+
"""
|
| 7 |
+
Node logic implementation.
|
| 8 |
+
"""
|
| 9 |
+
def __init__(self,model):
|
| 10 |
+
self.llm = model
|
| 11 |
+
|
| 12 |
+
def process(self, state: State) -> dict:
|
| 13 |
+
"""
|
| 14 |
+
Processes the input state and generates user stories based on user requirements.
|
| 15 |
+
"""
|
| 16 |
+
user_stories_review=state.get('user_stories_review', '')
|
| 17 |
+
user_stories=state.get('user_stories', '')
|
| 18 |
+
if user_stories_review:
|
| 19 |
+
response=self.llm.invoke(USERSTORY_MODIFY_INSTRNS.format(user_stories_review=user_stories_review,
|
| 20 |
+
user_stories=user_stories))
|
| 21 |
+
logger.info("IN MODIFY USER STORIES")
|
| 22 |
+
else:
|
| 23 |
+
response=self.llm.invoke(USERSTORY_GEN_INSTRNS.format(user_requirements=state["user_requirements"],
|
| 24 |
+
user_stories=user_stories))
|
| 25 |
+
logger.info("In GENERATE USER STORIES...")
|
| 26 |
+
return {"user_stories":response.content}
|
| 27 |
+
|
src/sdlc/prompts/__init__.py
ADDED
|
File without changes
|
src/sdlc/prompts/prompts.py
ADDED
|
@@ -0,0 +1,266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
USERSTORY_GEN_INSTRNS="""You are tasked with creating a detailed user story based on the given user requirements.
|
| 2 |
+
Provide a single title for the user story and add all details expected from the requirements. Follow these instructions carefully:
|
| 3 |
+
|
| 4 |
+
1. **Review** the given user requirements carefully:
|
| 5 |
+
{user_requirements}
|
| 6 |
+
|
| 7 |
+
2. **Format the output using Markdown**.
|
| 8 |
+
|
| 9 |
+
3. **Do not include any preamble** before listing the user stories.
|
| 10 |
+
"""
|
| 11 |
+
USERSTORY_MODIFY_INSTRNS="""You are tasked with modifying the existing user stories based on the given user feedback.
|
| 12 |
+
Follow these instructions carefully:
|
| 13 |
+
|
| 14 |
+
1. Modify the already generated user stories based on editorial feedback.Keep the general outline of the already generated user stories :
|
| 15 |
+
- Previously generated user stories:
|
| 16 |
+
{user_stories}
|
| 17 |
+
- Editorial feedback:
|
| 18 |
+
{user_stories_review}
|
| 19 |
+
|
| 20 |
+
2. **Format the output using Markdown**.
|
| 21 |
+
|
| 22 |
+
3. **Do not include any preamble** before listing the user stories.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
DESIGNDOCS_GEN_INSTRNS="""
|
| 26 |
+
You are tasked with creating a **comprehensive design document** based on the given user stories. Follow these instructions carefully:
|
| 27 |
+
|
| 28 |
+
1. **Review the given user stories carefully:**
|
| 29 |
+
{user_stories}
|
| 30 |
+
|
| 31 |
+
2. **Structure the design document as follows:**
|
| 32 |
+
- **Introduction:** Provide a high-level summary of the system.
|
| 33 |
+
- **System Overview:** Describe the core functionality and scope.
|
| 34 |
+
- **Functional Requirements:** List detailed functional requirements derived from the user stories.
|
| 35 |
+
- **Use Cases:** Define key use cases and user interactions with the system.
|
| 36 |
+
- **Architecture Design:** Include a high-level architecture diagram in Markdown mermaid format.
|
| 37 |
+
- **Technology Stack:** Specify the programming languages, frameworks, and tools used.
|
| 38 |
+
- **Database Schema:** Define tables, relationships, and key constraints.
|
| 39 |
+
- **API Specifications:** Outline RESTful or GraphQL endpoints with request/response formats.
|
| 40 |
+
- **Deployment Strategy:** Explain CI/CD pipelines, hosting, and cloud infrastructure.
|
| 41 |
+
- **Security Considerations:** Detail authentication, authorization, and data protection measures.
|
| 42 |
+
- **Code Considerations:** Describe how the front-end and back-end code will be structured.
|
| 43 |
+
|
| 44 |
+
3. **Return the final software design document in Markdown format.**
|
| 45 |
+
- Embed the **architecture diagram** as a Mermaid diagram.
|
| 46 |
+
- Ensure it covers all coding aspects, including front-end and back-end implementation details.
|
| 47 |
+
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
DESIGN_MODIFY_INSTRNS="""You are tasked with modifying the existing design documents based on the given user feedback.
|
| 51 |
+
Follow these instructions carefully:
|
| 52 |
+
|
| 53 |
+
1. Modify the already generated design documents based on editorial feedback.Keep the general outline of the already generated design documents :
|
| 54 |
+
- Previously generated design documents:
|
| 55 |
+
{design_documents}
|
| 56 |
+
- Editorial feedback:
|
| 57 |
+
{design_review}
|
| 58 |
+
|
| 59 |
+
2. **Format the output using Markdown**.
|
| 60 |
+
|
| 61 |
+
3. **Do not include any preamble** before listing the design documents.
|
| 62 |
+
"""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
CODE_ORCHESTRATOR__INSTRNS="""
|
| 66 |
+
Generate a production-level code architecture plan based on the provided design document : {design_documents}.
|
| 67 |
+
Follow these instructions carefully to ensure a structured and well-organized folder hierarchy:
|
| 68 |
+
1. Define the sub folder structure based on the design documents for placing files of related functionalities.
|
| 69 |
+
2. Determine file structure: Generate unique file names with appropriate extensions based on all functionalities specified in the design document.Limit the number of files to 10 or lower as applicable.
|
| 70 |
+
3. Group related files into subfolders based on their functionality.
|
| 71 |
+
4. Ensure correct naming convention:
|
| 72 |
+
- For each file, include its full path within the "name" key
|
| 73 |
+
- Ensure every file is assigned to a subfolder when applicable.Multiple files can come under a single subfolder.
|
| 74 |
+
- Try to limit subfolder count to maximum of 5 or lower as applicable
|
| 75 |
+
5. Provide meaningful descriptions: Each file should have a clear and concise description under the "description" key, explaining its purpose and role in the system.
|
| 76 |
+
6. Output format: Return a JSON object containing multiple file entries, each with "name" and "description" keys for multiple files
|
| 77 |
+
7. Production-ready best practices: Ensure the folder structure is scalable, modular, aligning with industry standards for production environments.
|
| 78 |
+
"""
|
| 79 |
+
|
| 80 |
+
CODE_GEN_INSTRUCTIONS="""
|
| 81 |
+
Generate code contents for the code file based on the details given file name: {codefilename} and description: {codefiledescription}.
|
| 82 |
+
Follow the below instructions carefully.
|
| 83 |
+
1. Check for any optional feedback {code_review} and generate the code accordingly.
|
| 84 |
+
2. Return the 'name' and 'code' for the file in JSON format.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
FILE_MODIFY_INSTRNS="""
|
| 88 |
+
Examine the code review feedback provided :{code_review}.
|
| 89 |
+
The existing code file names with the descriptions is as provided : {codefiletypes}
|
| 90 |
+
Identify the code file names that need modifications based on the descriptions and feedback provided.
|
| 91 |
+
If feedback requires new files to be added,provide file name and description accordingly.
|
| 92 |
+
Return the files to be modified with names and descriptions as a JSON object
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
SUMMARIZE_DESIGN_DOCS="""
|
| 96 |
+
Summarize the following **detailed design document** : {design_documents} to extract only the most relevant information for code review.
|
| 97 |
+
### **Output Format**
|
| 98 |
+
Provide a structured summary focusing on:
|
| 99 |
+
1. **System Overview**: A brief summary of the system’s purpose and functionality.
|
| 100 |
+
2. **Functional Requirements**: Key expected features and functionalities.
|
| 101 |
+
3. **Technical Requirements**: Technologies, frameworks, database structures, and any architectural constraints.
|
| 102 |
+
4. **Code Structure**: Expected file organization, naming conventions, and module breakdown.
|
| 103 |
+
5. **Security & Performance Considerations**: Any key performance optimizations or security best practices.
|
| 104 |
+
|
| 105 |
+
Return the response in **concise bullet points** to facilitate quick comparison with the generated code.Limit the word count to a maximum of 500 and make it concise.
|
| 106 |
+
"""
|
| 107 |
+
|
| 108 |
+
CODE_REVIEW_INSTRNS = """
|
| 109 |
+
You are reviewing the generated code to ensure it aligns with the design document: {design_documents}.
|
| 110 |
+
Identify discrepancies, suggest modifications, and determine if new files are required based on the already generated code: {generated_code}.
|
| 111 |
+
|
| 112 |
+
Follow the below instructions:
|
| 113 |
+
1. Compare the generated code against the design document to identify any missing, incorrect, or incomplete implementations.
|
| 114 |
+
2. Ensure all functionalities described in the design document are correctly implemented.
|
| 115 |
+
3. Verify adherence to architecture, logic, and expected behavior.
|
| 116 |
+
4. Determine required modifications or if any functionality is missing. List the file names that need modifications and clearly describe the required changes.
|
| 117 |
+
5. Do not use Markdown formatting, bullet points, titles, or special characters. Provide a clear, structured response in plain text.
|
| 118 |
+
6. Output format:
|
| 119 |
+
- List the names of the files that need modifications.
|
| 120 |
+
- Provide a clear explanation of the issues in each file.
|
| 121 |
+
- Describe specifically what needs to be added, removed, or changed.
|
| 122 |
+
- Use simple sentences and paragraphs without special formatting.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
SECURITY_REVIEW_INSTRNS="""
|
| 126 |
+
You are a security expert tasked with reviewing the security of a set of generated code files : {generated_code} where
|
| 127 |
+
|
| 128 |
+
- The keys represent file names with extensions (e.g., "app.py", "index.js", "config.yaml").
|
| 129 |
+
- The values contain the source code of each file.
|
| 130 |
+
|
| 131 |
+
Your task:
|
| 132 |
+
1. Analyze each file for security vulnerabilities.
|
| 133 |
+
2. Identify common security risks, including but not limited to:
|
| 134 |
+
- Hardcoded credentials, API keys, or secrets.
|
| 135 |
+
- Injection vulnerabilities (SQL injection, command injection, XSS, etc.).
|
| 136 |
+
- Insecure authentication or missing authorization checks.
|
| 137 |
+
- Use of outdated or vulnerable dependencies.
|
| 138 |
+
- Weak cryptographic practices.
|
| 139 |
+
- Improper input validation and sanitization.
|
| 140 |
+
- Insecure file handling or deserialization risks.
|
| 141 |
+
- Lack of logging or excessive logging of sensitive data.
|
| 142 |
+
- Misconfigured security settings in configuration files.
|
| 143 |
+
|
| 144 |
+
Format your response in **Markdown** without any preamble:
|
| 145 |
+
"""
|
| 146 |
+
|
| 147 |
+
TESTCASES_GEN_INSTRNS="""
|
| 148 |
+
You are an expert in software testing and test case generation. Given a design document containing functional, technical, and structural requirements, generate a consolidated test case table. Each test case should include key attributes like test scenario, test steps, expected result, and test type.
|
| 149 |
+
|
| 150 |
+
Input:
|
| 151 |
+
The design document contains the following details:{design_documents}
|
| 152 |
+
|
| 153 |
+
Functional Requirements - Describe how the system should behave.
|
| 154 |
+
|
| 155 |
+
Technical Requirements - Specify implementation details, integrations, and constraints.
|
| 156 |
+
|
| 157 |
+
Structural Requirements - Define architectural and infrastructure-level details.
|
| 158 |
+
|
| 159 |
+
Output Format (Table Format) without a preamble:
|
| 160 |
+
Provide a structured response in a tabular format with the following columns:
|
| 161 |
+
|
| 162 |
+
Test Case ID - Unique identifier for the test case.
|
| 163 |
+
|
| 164 |
+
Use Case - The specific use case being tested.
|
| 165 |
+
|
| 166 |
+
Test Scenario - A brief description of what is being tested.
|
| 167 |
+
|
| 168 |
+
Test Steps - Step-by-step procedure to execute the test.
|
| 169 |
+
|
| 170 |
+
Expected Result - The anticipated system behavior after execution.
|
| 171 |
+
|
| 172 |
+
Test Type - Functional, Integration, Structural, or Performance.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
DEPLOYMENT_INSTRUCTIONS="""
|
| 176 |
+
You are an SDLC deployment assistant. You will be given a software design document.
|
| 177 |
+
|
| 178 |
+
Your task is to:
|
| 179 |
+
|
| 180 |
+
1. **Analyze the document** and determine if it includes any deployment-related preferences or decisions. Look for:
|
| 181 |
+
- Platforms (e.g., AWS, Heroku, Vercel, Netlify, Docker, Kubernetes, etc.)
|
| 182 |
+
- Hosting types (e.g., static, containerized, serverless)
|
| 183 |
+
- Infrastructure details (e.g., EC2, S3, Cloud Run, etc.)
|
| 184 |
+
- CI/CD or version control notes relevant to deployment
|
| 185 |
+
|
| 186 |
+
2. If **deployment is mentioned**, extract the method or platform clearly.
|
| 187 |
+
|
| 188 |
+
3. If **no deployment is mentioned**, pick a **suitable and commonly used deployment platform at random** (such as Heroku, Netlify, or AWS EC2) based on the application's likely needs.
|
| 189 |
+
|
| 190 |
+
4. Provide **clear deployment steps** for the identified or assumed deployment method.
|
| 191 |
+
|
| 192 |
+
### Input:
|
| 193 |
+
{design_document}
|
| 194 |
+
|
| 195 |
+
### Output:
|
| 196 |
+
- **Detected or Assumed Deployment Method:** <deployment_type_or_platform>
|
| 197 |
+
- **Deployment Steps:**
|
| 198 |
+
1. ...
|
| 199 |
+
2. ...
|
| 200 |
+
3. ...
|
| 201 |
+
|
| 202 |
+
"""
|
| 203 |
+
|
| 204 |
+
MONITORING_FB_INSTRNS="""
|
| 205 |
+
You are an experienced DevOps and SRE (Site Reliability Engineering) expert.
|
| 206 |
+
|
| 207 |
+
Below are the deployment instructions for a software application. Your task is to generate actionable, concise, and technically sound instructions for:
|
| 208 |
+
|
| 209 |
+
1. **Monitoring** the system after deployment.
|
| 210 |
+
2. **Collecting and analyzing feedback**, including error logs, user input, and performance metrics.
|
| 211 |
+
|
| 212 |
+
---
|
| 213 |
+
### Deployment Instructions:
|
| 214 |
+
{deployment_instructions}
|
| 215 |
+
|
| 216 |
+
---
|
| 217 |
+
|
| 218 |
+
### Output Format:
|
| 219 |
+
|
| 220 |
+
## Monitoring Instructions
|
| 221 |
+
- (bullet point list of what and how to monitor based on deployment details)
|
| 222 |
+
|
| 223 |
+
## Feedback Collection Instructions
|
| 224 |
+
- (bullet point list of how feedback should be collected and reviewed post-deployment)
|
| 225 |
+
|
| 226 |
+
Focus on tools, logs, services, metrics, and automation where applicable.
|
| 227 |
+
|
| 228 |
+
"""
|
| 229 |
+
|
| 230 |
+
MAINTANENCE_INSTRNS="""
|
| 231 |
+
You are assisting with software maintenance based on user feedback.
|
| 232 |
+
|
| 233 |
+
Optional Feedback Received:
|
| 234 |
+
{user_feedback}
|
| 235 |
+
If no feedback received, respond that no user feedback for maintanence received and generate a project-specific maintenance plan.
|
| 236 |
+
Else, if feedback received,based on this feedback, analyze the likely issue and generate a project-specific maintenance plan that includes:
|
| 237 |
+
|
| 238 |
+
- Summary of the problem
|
| 239 |
+
|
| 240 |
+
- Likely cause or affected area
|
| 241 |
+
|
| 242 |
+
- Recommended fix or update
|
| 243 |
+
|
| 244 |
+
- Tests or documentation to update
|
| 245 |
+
|
| 246 |
+
- Any potential security or deployment impact
|
| 247 |
+
|
| 248 |
+
Output is in markdown format as follows:
|
| 249 |
+
|
| 250 |
+
## Maintenance Plan
|
| 251 |
+
|
| 252 |
+
### User Feedback
|
| 253 |
+
"{user_feedback}"
|
| 254 |
+
|
| 255 |
+
### Likely Cause
|
| 256 |
+
...
|
| 257 |
+
|
| 258 |
+
### Suggested Fix or Update
|
| 259 |
+
...
|
| 260 |
+
|
| 261 |
+
### Tests & Docs
|
| 262 |
+
...
|
| 263 |
+
|
| 264 |
+
### Security / Deployment Notes
|
| 265 |
+
...
|
| 266 |
+
"""
|
src/sdlc/schema/__init__.py
ADDED
|
File without changes
|
src/sdlc/schema/codefiles.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel,Field
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
# Schema for structured output to use in planning
|
| 5 |
+
class CodeFile(BaseModel):
|
| 6 |
+
name: str = Field(
|
| 7 |
+
description="Name of the code file with extension",
|
| 8 |
+
)
|
| 9 |
+
code: str = Field(
|
| 10 |
+
description="Code content of the file",
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
class CodeFiles(BaseModel):
|
| 14 |
+
codefiles: List[CodeFile] = Field(
|
| 15 |
+
description="List of code files.",
|
| 16 |
+
)
|
src/sdlc/schema/codefiletypes.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel,Field
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
class CodeFileType(BaseModel):
|
| 5 |
+
name: str = Field(
|
| 6 |
+
description="Name of the code file with extension",
|
| 7 |
+
)
|
| 8 |
+
description: str = Field(
|
| 9 |
+
description="Description of the functionality in the file",
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
class CodeFileTypes(BaseModel):
|
| 13 |
+
codefiletypes: List[CodeFileType] = Field(
|
| 14 |
+
description="List of code files.",
|
| 15 |
+
)
|
| 16 |
+
|
src/sdlc/states/__init__.py
ADDED
|
File without changes
|
src/sdlc/states/states.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Annotated, Literal, Optional
|
| 2 |
+
from typing_extensions import TypedDict
|
| 3 |
+
from typing import TypedDict, Annotated, List,Dict
|
| 4 |
+
from src.sdlc.schema.codefiletypes import CodeFileType
|
| 5 |
+
from src.sdlc.schema.codefiles import CodeFile
|
| 6 |
+
import operator
|
| 7 |
+
|
| 8 |
+
class State(TypedDict):
|
| 9 |
+
user_requirements:str
|
| 10 |
+
user_stories:str
|
| 11 |
+
user_stories_review:str
|
| 12 |
+
design_documents:str
|
| 13 |
+
design_documents_review:str
|
| 14 |
+
design_summary:str
|
| 15 |
+
codefiletypes: List[CodeFileType]
|
| 16 |
+
generated_files: Annotated[
|
| 17 |
+
List[CodeFile], operator.add
|
| 18 |
+
] # All workers write to this key in parallel
|
| 19 |
+
generated_code: Dict
|
| 20 |
+
generated_code_review:str
|
| 21 |
+
security_check:str
|
| 22 |
+
security_check_review:str
|
| 23 |
+
test_cases:str
|
| 24 |
+
test_cases_review:str
|
| 25 |
+
qa_testing:Dict
|
| 26 |
+
qa_status:str
|
| 27 |
+
deployment:str
|
| 28 |
+
monitoring_and_feedback:str
|
| 29 |
+
monitoring_and_feedback_review:str
|
| 30 |
+
maintanence_and_updates:str
|
| 31 |
+
consolidated_artifacts:Dict
|
| 32 |
+
|
| 33 |
+
# Subgraph state
|
| 34 |
+
class CoderState(TypedDict):
|
| 35 |
+
design_summary:str
|
| 36 |
+
codefiletypes: List[CodeFileType]
|
| 37 |
+
generated_files: Annotated[
|
| 38 |
+
List[CodeFile], operator.add
|
| 39 |
+
] # All workers write to this key in parallel
|
| 40 |
+
generated_code: Dict
|
| 41 |
+
generated_code_review:str
|
| 42 |
+
security_review:str
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# Worker state
|
| 46 |
+
class WorkerState(TypedDict):
|
| 47 |
+
codefiletype: CodeFileType
|
| 48 |
+
code_review:str
|
| 49 |
+
security_review:str
|
| 50 |
+
generated_files: Annotated[List[CodeFile], operator.add]
|
src/sdlc/ui/__init__.py
ADDED
|
File without changes
|
src/sdlc/ui/streamlitui/display_artifacts.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from src.sdlc.utils.utils import display_states
|
| 3 |
+
import base64
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
def display_downloads(self):
|
| 7 |
+
state = self.graph.get_state(config=st.session_state.thread) # Fetch the state from the graph
|
| 8 |
+
state_dict = state.values
|
| 9 |
+
st.write("### Downloadable assets from the SDLC")
|
| 10 |
+
for key, value in state_dict.items():
|
| 11 |
+
if key=="generated_code":
|
| 12 |
+
# Convert ZIP to base64
|
| 13 |
+
zip_bytes = st.session_state.zip_buffer.getvalue()
|
| 14 |
+
b64_zip = base64.b64encode(zip_bytes).decode()
|
| 15 |
+
# Create a download link
|
| 16 |
+
href = f'''<a href="data:application/zip;base64,{b64_zip}"
|
| 17 |
+
download="generated_code.zip"
|
| 18 |
+
style="text-decoration: none; color: inherit; font-weight: bold;">⬇️ Download code files</a>'''
|
| 19 |
+
st.markdown(href, unsafe_allow_html=True)
|
| 20 |
+
elif key=="qa_testing":
|
| 21 |
+
#Convert the list to a JSON string
|
| 22 |
+
json_str = json.dumps(value['details'])
|
| 23 |
+
display_states(self.curr_state, json_str)
|
| 24 |
+
elif key in self.sdlc_nodes and key !="consolidated_artifacts":
|
| 25 |
+
display_states(key,value)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
src/sdlc/ui/streamlitui/display_code.py
ADDED
|
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from langgraph.types import Command
|
| 3 |
+
import zipfile
|
| 4 |
+
import io
|
| 5 |
+
|
| 6 |
+
def create_zip(generated_code):
|
| 7 |
+
zip_buffer = io.BytesIO()
|
| 8 |
+
with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
|
| 9 |
+
for file_path, code_content in generated_code.items():
|
| 10 |
+
zip_file.writestr(file_path, code_content)
|
| 11 |
+
zip_buffer.seek(0)
|
| 12 |
+
return zip_buffer
|
| 13 |
+
|
| 14 |
+
def display_code_files(self, generated_code):
|
| 15 |
+
st.title("Generated Code Review")
|
| 16 |
+
|
| 17 |
+
st.subheader("📄 Generated Code Files")
|
| 18 |
+
|
| 19 |
+
# Display all code files
|
| 20 |
+
for file_path, code_content in generated_code.items():
|
| 21 |
+
st.markdown(f"**`{file_path}`**")
|
| 22 |
+
st.code(code_content, language="python") # adjust language as needed
|
| 23 |
+
|
| 24 |
+
# Create ZIP only once
|
| 25 |
+
if "zip_buffer" not in st.session_state:
|
| 26 |
+
st.session_state.zip_buffer = create_zip(generated_code)
|
| 27 |
+
|
| 28 |
+
# Download button
|
| 29 |
+
st.download_button(
|
| 30 |
+
label="⬇️ Download All as ZIP",
|
| 31 |
+
data=st.session_state.zip_buffer,
|
| 32 |
+
file_name="generated_code.zip",
|
| 33 |
+
mime="application/zip"
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
# Inline Approve + Feedback Form
|
| 37 |
+
st.divider()
|
| 38 |
+
|
| 39 |
+
with st.form(key="approve_feedback_form"):
|
| 40 |
+
col1, col2 = st.columns([1, 4]) # Adjust ratio as needed
|
| 41 |
+
|
| 42 |
+
with col1:
|
| 43 |
+
approve = st.form_submit_button("✅ Approve")
|
| 44 |
+
|
| 45 |
+
with col2:
|
| 46 |
+
feedback = st.text_area("Edit/Submit feedback if not approved:", value=self.feedback, height=700)
|
| 47 |
+
submit_feedback = st.form_submit_button("✏️ Submit Feedback")
|
| 48 |
+
|
| 49 |
+
if approve:
|
| 50 |
+
self.graph.invoke(Command(resume=""), config=st.session_state.thread)
|
| 51 |
+
if self.index < len(self.sdlc_nodes) - 1:
|
| 52 |
+
st.session_state.curr_state = self.sdlc_nodes[self.index + 1]
|
| 53 |
+
st.session_state.feedback_text = ""
|
| 54 |
+
st.rerun()
|
| 55 |
+
|
| 56 |
+
elif submit_feedback and feedback.strip():
|
| 57 |
+
st.session_state.feedback_text = feedback
|
| 58 |
+
self.graph.invoke(Command(resume=feedback), config=st.session_state.thread)
|
| 59 |
+
st.session_state.feedback_text = ""
|
| 60 |
+
st.rerun()
|
src/sdlc/ui/streamlitui/display_qa_testing.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import pandas as pd
|
| 3 |
+
|
| 4 |
+
def display_qa_results(self,response):
|
| 5 |
+
|
| 6 |
+
# Display summary
|
| 7 |
+
st.subheader("🧪 Test Summary")
|
| 8 |
+
st.write(response['summary'])
|
| 9 |
+
|
| 10 |
+
# Display the table in markdown
|
| 11 |
+
st.subheader("📋 Test Table")
|
| 12 |
+
st.markdown(response['table'], unsafe_allow_html=True)
|
| 13 |
+
|
| 14 |
+
# Optionally show detailed error info (if any)
|
| 15 |
+
st.subheader("⚠️ Detailed Errors")
|
| 16 |
+
errors = [item for item in response['details'] if item.get("Status") == "❌ Fail"]
|
| 17 |
+
if errors:
|
| 18 |
+
df_errors = pd.DataFrame(errors)
|
| 19 |
+
st.dataframe(df_errors)
|
| 20 |
+
else:
|
| 21 |
+
st.success("No errors found!")
|
src/sdlc/ui/streamlitui/display_result.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import uuid
|
| 3 |
+
import base64
|
| 4 |
+
from src.sdlc.utils.utils import get_cached_sdlc_nodes, get_cached_graph, display_states
|
| 5 |
+
from src.sdlc.ui.streamlitui.display_code import display_code_files
|
| 6 |
+
from src.sdlc.ui.streamlitui.display_qa_testing import display_qa_results
|
| 7 |
+
from src.sdlc.ui.streamlitui.display_artifacts import display_downloads
|
| 8 |
+
from langgraph.types import Command
|
| 9 |
+
import json
|
| 10 |
+
|
| 11 |
+
class DisplayResultStreamlit:
|
| 12 |
+
def __init__(self):
|
| 13 |
+
if "graph" not in st.session_state or st.session_state.page == "home":
|
| 14 |
+
st.session_state.graph = get_cached_graph()
|
| 15 |
+
self.graph = st.session_state.graph # Use cached graph
|
| 16 |
+
if not self.graph:
|
| 17 |
+
st.error("Graph is missing! Restart the SDLC process.")
|
| 18 |
+
return
|
| 19 |
+
self.user_requirements = st.session_state.user_requirements
|
| 20 |
+
self.sdlc_nodes = get_cached_sdlc_nodes()
|
| 21 |
+
if "thread" not in st.session_state:
|
| 22 |
+
st.session_state.thread = {"configurable": {"thread_id": str(uuid.uuid4())}}
|
| 23 |
+
if "curr_state" not in st.session_state:
|
| 24 |
+
st.session_state.curr_state = self.sdlc_nodes[0] # Start from first stage
|
| 25 |
+
|
| 26 |
+
# Initialize breadcrumbs in session state
|
| 27 |
+
if "breadcrumbs" not in st.session_state:
|
| 28 |
+
st.session_state.breadcrumbs = []
|
| 29 |
+
if "feedback_text" not in st.session_state:
|
| 30 |
+
st.session_state.feedback_text = ""
|
| 31 |
+
|
| 32 |
+
def generate_sdlc(self):
|
| 33 |
+
try:
|
| 34 |
+
self.index = self.sdlc_nodes.index(st.session_state.curr_state)
|
| 35 |
+
request_payload = None
|
| 36 |
+
|
| 37 |
+
if self.index == 0:
|
| 38 |
+
request_payload = {"user_requirements": self.user_requirements}
|
| 39 |
+
|
| 40 |
+
response = ""
|
| 41 |
+
|
| 42 |
+
for event in self.graph.stream(request_payload, config=st.session_state.thread, stream_mode="values"):
|
| 43 |
+
response = event.get(st.session_state.curr_state, "")
|
| 44 |
+
review = event.get("generated_code_review", "")
|
| 45 |
+
if st.session_state.curr_state == "generated_code" and review:
|
| 46 |
+
self.feedback = review
|
| 47 |
+
|
| 48 |
+
except KeyError as e:
|
| 49 |
+
st.error(f"Graph execution error: Missing key {e}")
|
| 50 |
+
response = None
|
| 51 |
+
except Exception as e:
|
| 52 |
+
st.error(f"Graph execution error: {e}")
|
| 53 |
+
|
| 54 |
+
return response
|
| 55 |
+
|
| 56 |
+
def display_result_on_ui(self):
|
| 57 |
+
self.curr_state = st.session_state.curr_state # Get current state
|
| 58 |
+
response = self.generate_sdlc()
|
| 59 |
+
|
| 60 |
+
# Update breadcrumbs
|
| 61 |
+
if self.curr_state not in st.session_state.breadcrumbs:
|
| 62 |
+
st.session_state.breadcrumbs.append(self.curr_state)
|
| 63 |
+
|
| 64 |
+
# Define layout with right-side progress column
|
| 65 |
+
col_main, col_sidebar = st.columns([3, 1])
|
| 66 |
+
|
| 67 |
+
with col_sidebar:
|
| 68 |
+
st.markdown("### 🔗 SDLC Progress")
|
| 69 |
+
|
| 70 |
+
for idx, state in enumerate(self.sdlc_nodes):
|
| 71 |
+
display_name = state.replace('_', ' ').title()
|
| 72 |
+
|
| 73 |
+
if state == self.curr_state:
|
| 74 |
+
# ✅ Current node: green border, bold, larger font
|
| 75 |
+
st.markdown(
|
| 76 |
+
f"""
|
| 77 |
+
<div style="
|
| 78 |
+
padding: 6px 12px;
|
| 79 |
+
margin-bottom: 6px;
|
| 80 |
+
border-left: 4px solid #2e7d32;
|
| 81 |
+
color: white;
|
| 82 |
+
font-size: 15px;
|
| 83 |
+
font-weight: bold;">
|
| 84 |
+
🔄 {display_name}
|
| 85 |
+
</div>
|
| 86 |
+
""",
|
| 87 |
+
unsafe_allow_html=True
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
elif state in st.session_state.breadcrumbs:
|
| 91 |
+
# ✅ Completed node
|
| 92 |
+
st.markdown(
|
| 93 |
+
f"""
|
| 94 |
+
<div style="
|
| 95 |
+
padding: 6px 12px;
|
| 96 |
+
margin-bottom: 6px;
|
| 97 |
+
color: #ccc;
|
| 98 |
+
font-size: 14px;">
|
| 99 |
+
✅ {display_name}
|
| 100 |
+
</div>
|
| 101 |
+
""",
|
| 102 |
+
unsafe_allow_html=True
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
else:
|
| 106 |
+
# ⏳ Upcoming node
|
| 107 |
+
st.markdown(
|
| 108 |
+
f"""
|
| 109 |
+
<div style="
|
| 110 |
+
padding: 6px 12px;
|
| 111 |
+
margin-bottom: 6px;
|
| 112 |
+
color: #888;
|
| 113 |
+
font-size: 14px;">
|
| 114 |
+
⏳ {display_name}
|
| 115 |
+
</div>
|
| 116 |
+
""",
|
| 117 |
+
unsafe_allow_html=True
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
with col_main:
|
| 124 |
+
# Display the main SDLC phase
|
| 125 |
+
state = self.curr_state.replace('_', ' ')
|
| 126 |
+
st.subheader(f"🛠️ **SDLC Phase: {state.title()}**")
|
| 127 |
+
if self.curr_state=="qa_testing":
|
| 128 |
+
print(response)
|
| 129 |
+
|
| 130 |
+
if response:
|
| 131 |
+
#with st.expander(f"📜 **{state}**", expanded=True):
|
| 132 |
+
if self.curr_state == "consolidated_artifacts":
|
| 133 |
+
display_downloads(self)
|
| 134 |
+
elif self.curr_state == "generated_code":
|
| 135 |
+
display_code_files(self, response)
|
| 136 |
+
elif self.curr_state == "qa_testing":
|
| 137 |
+
display_qa_results(self,response)
|
| 138 |
+
else:
|
| 139 |
+
st.markdown(response)
|
| 140 |
+
|
| 141 |
+
if self.curr_state != "consolidated_artifacts" and self.curr_state != "generated_code":
|
| 142 |
+
with st.form(key="feedback_form", clear_on_submit=True):
|
| 143 |
+
col1, col2 = st.columns([1, 2])
|
| 144 |
+
with col1:
|
| 145 |
+
approve = st.form_submit_button("✅ Approve")
|
| 146 |
+
with col2:
|
| 147 |
+
if self.curr_state != "qa_testing" and self.curr_state != "deployment" and self.curr_state !="maintanence_and_updates":
|
| 148 |
+
feedback = st.text_area("Provide feedback if not approved", value=st.session_state.feedback_text, key="feedback_input")
|
| 149 |
+
submit_feedback = st.form_submit_button("Submit Feedback")
|
| 150 |
+
status=""
|
| 151 |
+
if self.curr_state == "qa_testing" or self.curr_state == "deployment" or self.curr_state =="maintanence_and_updates":
|
| 152 |
+
submit_feedback=""
|
| 153 |
+
feedback=""
|
| 154 |
+
if self.curr_state == "qa_testing":
|
| 155 |
+
status=response
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
if approve:
|
| 159 |
+
try:
|
| 160 |
+
self.graph.invoke(Command(resume=status), config=st.session_state.thread)
|
| 161 |
+
if self.index < len(self.sdlc_nodes) - 1:
|
| 162 |
+
st.session_state.curr_state = self.sdlc_nodes[self.index + 1]
|
| 163 |
+
st.session_state.feedback_text = ""
|
| 164 |
+
st.rerun()
|
| 165 |
+
except ValueError:
|
| 166 |
+
st.error("No nodes found!")
|
| 167 |
+
|
| 168 |
+
elif submit_feedback and feedback:
|
| 169 |
+
st.session_state.feedback_text = feedback
|
| 170 |
+
self.graph.invoke(Command(resume=feedback), config=st.session_state.thread)
|
| 171 |
+
if self.curr_state =="monitoring_and_feedback" and self.index < len(self.sdlc_nodes) - 1:
|
| 172 |
+
st.session_state.curr_state = self.sdlc_nodes[self.index + 1]
|
| 173 |
+
st.session_state.feedback_text = ""
|
| 174 |
+
st.rerun()
|
| 175 |
+
|
| 176 |
+
if self.curr_state=="qa_testing":
|
| 177 |
+
#Convert the list to a JSON string
|
| 178 |
+
json_str = json.dumps(response['details'])
|
| 179 |
+
display_states(self.curr_state, json_str)
|
| 180 |
+
elif self.curr_state != "generated_code":
|
| 181 |
+
display_states(self.curr_state, response)
|
| 182 |
+
|
| 183 |
+
st.session_state.graph = self.graph
|
| 184 |
+
st.session_state.feedback_text = "" # Reset feedback
|
| 185 |
+
else:
|
| 186 |
+
st.error("Error occurred. Please restart the SDLC cycle.")
|
src/sdlc/ui/streamlitui/loadui.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from src.sdlc.ui.uiconfigfile import Config
|
| 3 |
+
|
| 4 |
+
#Sidebar with the user controls
|
| 5 |
+
class LoadStreamlitUI:
|
| 6 |
+
def __init__(self):
|
| 7 |
+
self.config = Config() # config
|
| 8 |
+
self.user_controls = {}
|
| 9 |
+
|
| 10 |
+
def initialize_session(self):
|
| 11 |
+
return {
|
| 12 |
+
"sdlc": ""
|
| 13 |
+
}
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def load_streamlit_ui(self):
|
| 17 |
+
st.set_page_config(page_title= "🛠️ " + self.config.get_page_title(), layout="wide")
|
| 18 |
+
|
| 19 |
+
with st.sidebar:
|
| 20 |
+
# Get options from config
|
| 21 |
+
llm_options = self.config.get_llm_options()
|
| 22 |
+
|
| 23 |
+
# LLM selection
|
| 24 |
+
self.user_controls["selected_llm"] = st.selectbox("Select LLM", llm_options)
|
| 25 |
+
|
| 26 |
+
if self.user_controls["selected_llm"] == 'Groq':
|
| 27 |
+
# Model selection
|
| 28 |
+
model_options = self.config.get_groq_model_options()
|
| 29 |
+
self.user_controls["selected_groq_model"] = st.selectbox("Select Model", model_options)
|
| 30 |
+
# API key input
|
| 31 |
+
self.user_controls["GROQ_API_KEY"] = st.session_state["GROQ_API_KEY"] = st.text_input("API Key",
|
| 32 |
+
type="password")
|
| 33 |
+
elif self.user_controls["selected_llm"] == 'OpenAI':
|
| 34 |
+
# Model selection
|
| 35 |
+
model_options = self.config.get_openai_model_options()
|
| 36 |
+
self.user_controls["selected_openai_model"] = st.selectbox("Select Model", model_options)
|
| 37 |
+
# API key input
|
| 38 |
+
self.user_controls["OPENAI_API_KEY"] = st.session_state["OPENAI_API_KEY"] = st.text_input("API Key",
|
| 39 |
+
type="password")
|
| 40 |
+
|
| 41 |
+
if "state" not in st.session_state:
|
| 42 |
+
st.session_state.state = self.initialize_session()
|
| 43 |
+
|
| 44 |
+
return self.user_controls
|
src/sdlc/ui/uiconfigfile.ini
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[DEFAULT]
|
| 2 |
+
PAGE_TITLE = SDLC Lifecycle
|
| 3 |
+
LLM_OPTIONS = Groq,OpenAI
|
| 4 |
+
GROQ_MODEL_OPTIONS = meta-llama/llama-4-scout-17b-16e-instruct,qwen-qwq-32b,llama3-70b-8192,llama3-8b-8192,llama-3.3-70b-versatile,gemma2-9b-it
|
| 5 |
+
OPENAI_MODEL_OPTIONS = GPT-4o,GPT-4o-mini,o3-mini
|
| 6 |
+
SDLC_NODES = user_stories,design_documents,generated_code,security_check,test_cases,qa_testing,deployment,monitoring_and_feedback,maintanence_and_updates,consolidated_artifacts
|
src/sdlc/ui/uiconfigfile.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from configparser import ConfigParser
|
| 2 |
+
|
| 3 |
+
class Config:
|
| 4 |
+
def __init__(self,config_file="./src/sdlc/ui/uiconfigfile.ini"):
|
| 5 |
+
self.config=ConfigParser()
|
| 6 |
+
self.config.read(config_file)
|
| 7 |
+
|
| 8 |
+
def get_llm_options(self):
|
| 9 |
+
return self.config["DEFAULT"].get("LLM_OPTIONS").split(",")
|
| 10 |
+
|
| 11 |
+
def get_groq_model_options(self):
|
| 12 |
+
return self.config["DEFAULT"].get("GROQ_MODEL_OPTIONS").split(",")
|
| 13 |
+
|
| 14 |
+
def get_openai_model_options(self):
|
| 15 |
+
return self.config["DEFAULT"].get("OPENAI_MODEL_OPTIONS").split(",")
|
| 16 |
+
|
| 17 |
+
def get_page_title(self):
|
| 18 |
+
return self.config["DEFAULT"].get("PAGE_TITLE")
|
| 19 |
+
|
| 20 |
+
def get_sdlc_nodes(self):
|
| 21 |
+
return self.config["DEFAULT"].get("SDLC_NODES")
|
| 22 |
+
|
| 23 |
+
|