msaifee commited on
Commit
974e5e3
·
1 Parent(s): 6a92230
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. app.py +7 -0
  2. app_api.py +4 -0
  3. requirements.txt +14 -0
  4. src/__init__.py +0 -0
  5. src/__pycache__/__init__.cpython-312.pyc +0 -0
  6. src/dev_pilot/LLMS/__init__.py +0 -0
  7. src/dev_pilot/LLMS/__pycache__/__init__.cpython-312.pyc +0 -0
  8. src/dev_pilot/LLMS/__pycache__/geminillm.cpython-312.pyc +0 -0
  9. src/dev_pilot/LLMS/__pycache__/groqllm.cpython-312.pyc +0 -0
  10. src/dev_pilot/LLMS/__pycache__/openai_llm.cpython-312.pyc +0 -0
  11. src/dev_pilot/LLMS/geminillm.py +24 -0
  12. src/dev_pilot/LLMS/groqllm.py +25 -0
  13. src/dev_pilot/LLMS/openai_llm.py +25 -0
  14. src/dev_pilot/__init__.py +0 -0
  15. src/dev_pilot/__pycache__/__init__.cpython-312.pyc +0 -0
  16. src/dev_pilot/__pycache__/main.cpython-312.pyc +0 -0
  17. src/dev_pilot/api/__init__.py +0 -0
  18. src/dev_pilot/api/__pycache__/__init__.cpython-312.pyc +0 -0
  19. src/dev_pilot/api/__pycache__/fastapi_app.cpython-312.pyc +0 -0
  20. src/dev_pilot/api/fastapi_app.py +202 -0
  21. src/dev_pilot/cache/__init__.py +0 -0
  22. src/dev_pilot/cache/__pycache__/__init__.cpython-312.pyc +0 -0
  23. src/dev_pilot/cache/__pycache__/redis_cache.cpython-312.pyc +0 -0
  24. src/dev_pilot/cache/redis_cache.py +54 -0
  25. src/dev_pilot/dto/__init__.py +0 -0
  26. src/dev_pilot/dto/__pycache__/__init__.cpython-312.pyc +0 -0
  27. src/dev_pilot/dto/__pycache__/sdlc_request.cpython-312.pyc +0 -0
  28. src/dev_pilot/dto/__pycache__/sdlc_response.cpython-312.pyc +0 -0
  29. src/dev_pilot/dto/sdlc_request.py +29 -0
  30. src/dev_pilot/dto/sdlc_response.py +11 -0
  31. src/dev_pilot/graph/__init__.py +0 -0
  32. src/dev_pilot/graph/__pycache__/__init__.cpython-312.pyc +0 -0
  33. src/dev_pilot/graph/__pycache__/graph_builder.cpython-312.pyc +0 -0
  34. src/dev_pilot/graph/__pycache__/graph_executor.cpython-312.pyc +0 -0
  35. src/dev_pilot/graph/graph_builder.py +174 -0
  36. src/dev_pilot/graph/graph_executor.py +114 -0
  37. src/dev_pilot/nodes/__init__.py +0 -0
  38. src/dev_pilot/nodes/__pycache__/__init__.cpython-312.pyc +0 -0
  39. src/dev_pilot/nodes/__pycache__/basic_chatbot_node.cpython-312.pyc +0 -0
  40. src/dev_pilot/nodes/__pycache__/chatbot_ai_news_node.cpython-312.pyc +0 -0
  41. src/dev_pilot/nodes/__pycache__/chatbot_with_tools_node.cpython-312.pyc +0 -0
  42. src/dev_pilot/nodes/__pycache__/coding_node.cpython-312.pyc +0 -0
  43. src/dev_pilot/nodes/__pycache__/design_document_node.cpython-312.pyc +0 -0
  44. src/dev_pilot/nodes/__pycache__/markdown_node.cpython-312.pyc +0 -0
  45. src/dev_pilot/nodes/__pycache__/project_requirement_node.cpython-312.pyc +0 -0
  46. src/dev_pilot/nodes/coding_node.py +280 -0
  47. src/dev_pilot/nodes/design_document_node.py +145 -0
  48. src/dev_pilot/nodes/markdown_node.py +124 -0
  49. src/dev_pilot/nodes/project_requirement_node.py +81 -0
  50. src/dev_pilot/state/__init__.py +0 -0
app.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import subprocess
3
+ import sys
4
+ from src.dev_pilot.ui.streamlit_ui.streamlit_app import load_app
5
+
6
+ if __name__ == "__main__":
7
+ load_app()
app_api.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from src.dev_pilot.api.fastapi_app import load_app
2
+
3
+ if __name__ == "__main__":
4
+ load_app()
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ langgraph
3
+ langchain_community
4
+ langchain_core
5
+ langchain_groq
6
+ langchain_openai
7
+ faiss_cpu
8
+ streamlit
9
+ langchain-google-genai
10
+ redis
11
+ upstash_redis
12
+ fastapi
13
+ uvicorn
14
+ loguru
src/__init__.py ADDED
File without changes
src/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (196 Bytes). View file
 
src/dev_pilot/LLMS/__init__.py ADDED
File without changes
src/dev_pilot/LLMS/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (221 Bytes). View file
 
src/dev_pilot/LLMS/__pycache__/geminillm.cpython-312.pyc ADDED
Binary file (1.43 kB). View file
 
src/dev_pilot/LLMS/__pycache__/groqllm.cpython-312.pyc ADDED
Binary file (1.4 kB). View file
 
src/dev_pilot/LLMS/__pycache__/openai_llm.cpython-312.pyc ADDED
Binary file (1.39 kB). View file
 
src/dev_pilot/LLMS/geminillm.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from langchain_google_genai import ChatGoogleGenerativeAI
4
+
5
+
6
+ class GeminiLLM:
7
+ def __init__(self, user_controls_input=None, model=None, api_key=None):
8
+ self.user_controls_input = user_controls_input
9
+ self.model = model
10
+ self.api_key = api_key
11
+
12
+ def get_llm_model(self):
13
+ try:
14
+ if self.user_controls_input:
15
+ gemini_api_key = self.user_controls_input['GEMINI_API_KEY']
16
+ selected_gemini_model = self.user_controls_input['selected_gemini_model']
17
+ llm = ChatGoogleGenerativeAI(api_key=gemini_api_key, model= selected_gemini_model)
18
+ else:
19
+ llm = ChatGoogleGenerativeAI(api_key=self.api_key,model=self.model)
20
+
21
+ except Exception as e:
22
+ raise ValueError(f"Error occured with Exception : {e}")
23
+
24
+ return llm
src/dev_pilot/LLMS/groqllm.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from langchain_groq import ChatGroq
4
+
5
+
6
+ class GroqLLM:
7
+ def __init__(self, user_controls_input=None, model=None, api_key=None):
8
+ self.user_controls_input = user_controls_input
9
+ self.model = model
10
+ self.api_key = api_key
11
+
12
+
13
+ def get_llm_model(self):
14
+ try:
15
+ if self.user_controls_input:
16
+ groq_api_key = self.user_controls_input['GROQ_API_KEY']
17
+ selected_groq_model = self.user_controls_input['selected_groq_model']
18
+ llm = ChatGroq(api_key=groq_api_key, model= selected_groq_model)
19
+ else:
20
+ llm = ChatGroq(api_key=self.api_key,model=self.model)
21
+
22
+ except Exception as e:
23
+ raise ValueError(f"Error occured with Exception : {e}")
24
+
25
+ return llm
src/dev_pilot/LLMS/openai_llm.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from langchain_openai import ChatOpenAI
4
+
5
+
6
+ class OpenAILLM:
7
+ def __init__(self, user_controls_input=None, model=None, api_key=None):
8
+ self.user_controls_input = user_controls_input
9
+ self.model = model
10
+ self.api_key = api_key
11
+
12
+
13
+ def get_llm_model(self):
14
+ try:
15
+ if self.user_controls_input:
16
+ openai_api_key = self.user_controls_input['OPENAI_API_KEY']
17
+ selected_openai_model = self.user_controls_input['selected_openai_model']
18
+ llm = ChatOpenAI(api_key=openai_api_key, model= selected_openai_model)
19
+ else:
20
+ llm = ChatOpenAI(api_key=openai_api_key, model= self.model)
21
+
22
+ except Exception as e:
23
+ raise ValueError(f"Error occured with Exception : {e}")
24
+
25
+ return llm
src/dev_pilot/__init__.py ADDED
File without changes
src/dev_pilot/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (216 Bytes). View file
 
src/dev_pilot/__pycache__/main.cpython-312.pyc ADDED
Binary file (12.5 kB). View file
 
src/dev_pilot/api/__init__.py ADDED
File without changes
src/dev_pilot/api/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (207 Bytes). View file
 
src/dev_pilot/api/__pycache__/fastapi_app.cpython-312.pyc ADDED
Binary file (8.29 kB). View file
 
src/dev_pilot/api/fastapi_app.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Depends, Request
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ import os
5
+ from dotenv import load_dotenv
6
+ from functools import lru_cache
7
+ from src.dev_pilot.LLMS.groqllm import GroqLLM
8
+ from src.dev_pilot.LLMS.geminillm import GeminiLLM
9
+ from src.dev_pilot.graph.graph_builder import GraphBuilder
10
+ from src.dev_pilot.graph.graph_executor import GraphExecutor
11
+ from src.dev_pilot.dto.sdlc_request import SDLCRequest
12
+ from src.dev_pilot.dto.sdlc_response import SDLCResponse
13
+ import uvicorn
14
+ from contextlib import asynccontextmanager
15
+ from src.dev_pilot.utils.logging_config import setup_logging
16
+ from loguru import logger
17
+
18
+ ## Setup logging level
19
+ setup_logging(log_level="DEBUG")
20
+
21
+ gemini_models = [
22
+ "gemini-2.0-flash",
23
+ "gemini-2.0-flash-lite",
24
+ "gemini-2.5-pro-exp-03-25"
25
+ ]
26
+
27
+ groq_models = [
28
+ "gemma2-9b-it",
29
+ "llama3-8b-8192",
30
+ "llama3-70b-8192"
31
+ ]
32
+
33
+ def load_app():
34
+ uvicorn.run(app, host="0.0.0.0", port=8000)
35
+
36
+ class Settings:
37
+ def __init__(self):
38
+ self.GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
39
+ self.GROQ_API_KEY = os.getenv("GROQ_API_KEY")
40
+
41
+ @lru_cache()
42
+ def get_settings():
43
+ return Settings()
44
+
45
+ def validate_api_keys(settings: Settings = Depends(get_settings)):
46
+ required_keys = {
47
+ 'GEMINI_API_KEY': settings.GEMINI_API_KEY,
48
+ 'GROQ_API_KEY': settings.GROQ_API_KEY
49
+ }
50
+
51
+ missing_keys = [key for key, value in required_keys.items() if not value]
52
+ if missing_keys:
53
+ raise HTTPException(
54
+ status_code=500,
55
+ detail=f"Missing required API keys: {', '.join(missing_keys)}"
56
+ )
57
+ return settings
58
+
59
+
60
+ # Initialize the LLM and GraphBuilder instances once and store them in the app state
61
+ @asynccontextmanager
62
+ async def lifespan(app: FastAPI):
63
+ settings = get_settings()
64
+ llm = GeminiLLM(model=gemini_models[0], api_key=settings.GEMINI_API_KEY).get_llm_model()
65
+ graph_builder = GraphBuilder(llm=llm)
66
+ graph = graph_builder.setup_graph()
67
+ graph_executor = GraphExecutor(graph)
68
+ app.state.llm = llm
69
+ app.state.graph = graph
70
+ app.state.graph_executor = graph_executor
71
+ yield
72
+ # Clean up resources if needed
73
+ app.state.llm = None
74
+ app.state.graph = None
75
+ app.state.graph_executor = None
76
+
77
+ app = FastAPI(
78
+ title="DevPilot API",
79
+ description="AI-powered SDLC API using Langgraph",
80
+ version="1.0.0",
81
+ lifespan=lifespan
82
+ )
83
+
84
+ logger.info("Application starting up...")
85
+
86
+ # Configure CORS
87
+ app.add_middleware(
88
+ CORSMiddleware,
89
+ allow_origins=["*"], # In production, replace with specific origins
90
+ allow_credentials=True,
91
+ allow_methods=["*"],
92
+ allow_headers=["*"],
93
+ )
94
+
95
+ @app.get("/")
96
+ async def root():
97
+ return {
98
+ "message": "Welcome to DevPilot API",
99
+ "docs_url": "/docs",
100
+ "redoc_url": "/redoc"
101
+ }
102
+
103
+ @app.post("/api/v1/sdlc/start", response_model=SDLCResponse)
104
+ async def start_sdlc(
105
+ sdlc_request: SDLCRequest,
106
+ settings: Settings = Depends(validate_api_keys)
107
+ ):
108
+
109
+ try:
110
+ graph_executor = app.state.graph_executor
111
+
112
+ if isinstance (graph_executor, GraphExecutor) == False:
113
+ raise Exception("Graph Executor not initialized")
114
+
115
+ graph_response = graph_executor.start_workflow(sdlc_request.project_name)
116
+
117
+ logger.debug(f"Start Workflow Response: {graph_response}")
118
+
119
+ return SDLCResponse(
120
+ status="success",
121
+ message="SDLC process started successfully",
122
+ task_id=graph_response["task_id"],
123
+ state=graph_response["state"]
124
+ )
125
+
126
+ except Exception as e:
127
+ error_response = SDLCResponse(
128
+ status="error",
129
+ message="Failed to start the process",
130
+ error=str(e)
131
+ )
132
+ return JSONResponse(status_code=500, content=error_response.model_dump())
133
+
134
+
135
+ @app.post("/api/v1/sdlc/user_stories", response_model=SDLCResponse)
136
+ async def start_sdlc(
137
+ sdlc_request: SDLCRequest,
138
+ settings: Settings = Depends(validate_api_keys)
139
+ ):
140
+
141
+ try:
142
+ graph_executor = app.state.graph_executor
143
+
144
+ if isinstance (graph_executor, GraphExecutor) == False:
145
+ raise Exception("Graph Executor not initialized")
146
+
147
+ graph_response = graph_executor.generate_stories(sdlc_request.task_id, sdlc_request.requirements)
148
+
149
+ logger.debug(f"Generate Stories Response: {graph_response}")
150
+
151
+ return SDLCResponse(
152
+ status="success",
153
+ message="User Stories generated successfully",
154
+ task_id=graph_response["task_id"],
155
+ state=graph_response["state"]
156
+ )
157
+
158
+ except Exception as e:
159
+ error_response = SDLCResponse(
160
+ status="error",
161
+ message="Failed to generate user stories",
162
+ error=str(e)
163
+ )
164
+ return JSONResponse(status_code=500, content=error_response.model_dump())
165
+
166
+
167
+ @app.post("/api/v1/sdlc/progress_flow", response_model=SDLCResponse)
168
+ async def progress_sdlc(
169
+ sdlc_request: SDLCRequest,
170
+ settings: Settings = Depends(validate_api_keys)
171
+ ):
172
+
173
+ try:
174
+
175
+ graph_executor = app.state.graph_executor
176
+
177
+ if isinstance (graph_executor, GraphExecutor) == False:
178
+ raise Exception("Graph Executor not initialized")
179
+
180
+ graph_response = graph_executor.graph_review_flow(
181
+ sdlc_request.task_id,
182
+ sdlc_request.status,
183
+ sdlc_request.feedback,
184
+ sdlc_request.next_node)
185
+
186
+ logger.debug(f"Flow Node: {sdlc_request.next_node}")
187
+ logger.debug(f"Progress Flow Response: {graph_response}")
188
+
189
+ return SDLCResponse(
190
+ status="success",
191
+ message="Flow progressed successfully to next step",
192
+ task_id=graph_response["task_id"],
193
+ state=graph_response["state"]
194
+ )
195
+
196
+ except Exception as e:
197
+ error_response = SDLCResponse(
198
+ status="error",
199
+ message="Failed to progress the flow",
200
+ error=str(e)
201
+ )
202
+ return JSONResponse(status_code=500, content=error_response.model_dump())
src/dev_pilot/cache/__init__.py ADDED
File without changes
src/dev_pilot/cache/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (221 Bytes). View file
 
src/dev_pilot/cache/__pycache__/redis_cache.cpython-312.pyc ADDED
Binary file (2.11 kB). View file
 
src/dev_pilot/cache/redis_cache.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import redis
2
+ import json
3
+ from typing import Optional
4
+ from src.dev_pilot.state.sdlc_state import CustomEncoder, SDLCState
5
+ from upstash_redis import Redis
6
+ import os
7
+ from dotenv import load_dotenv
8
+ from loguru import logger
9
+
10
+ load_dotenv()
11
+
12
+
13
+ # Initialize Redis client
14
+
15
+ ## Upstash Redis Client Configuraion
16
+ REDIS_URL = os.getenv("REDIS_URL")
17
+ REDIS_TOKEN = os.getenv("REDIS_TOKEN")
18
+ redis_client = redis = Redis(url=REDIS_URL, token=REDIS_TOKEN)
19
+
20
+ ## For testing locally with docker
21
+ # redis_client = redis.Redis(
22
+ # host='localhost', # Replace with your Redis host
23
+ # port=6379, # Replace with your Redis port
24
+ # db=0 # Replace with your Redis database number
25
+ # )
26
+
27
+ def save_state_to_redis(task_id: str, state: SDLCState):
28
+ """Save the state to Redis."""
29
+ state = json.dumps(state, cls=CustomEncoder)
30
+ redis_client.set(task_id, state)
31
+
32
+ # Set expiration for 24 hours
33
+ redis_client.expire(task_id, 86400)
34
+
35
+ def get_state_from_redis(task_id: str) -> Optional[SDLCState]:
36
+ """ Retrieves the state from redis """
37
+ state_json = redis_client.get(task_id)
38
+ if not state_json:
39
+ return None
40
+
41
+ state_dict = json.loads(state_json)[0]
42
+ return SDLCState(**state_dict)
43
+
44
+ def delete_from_redis(task_id: str):
45
+ """ Delete from redis """
46
+ redis_client.delete(task_id)
47
+
48
+ def flush_redis_cache():
49
+ """ Flushes the whole cache"""
50
+
51
+ # Clear all keys in all databases
52
+ redis_client.flushall()
53
+
54
+ logger.info("--- Redis cache cleared ---")
src/dev_pilot/dto/__init__.py ADDED
File without changes
src/dev_pilot/dto/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (207 Bytes). View file
 
src/dev_pilot/dto/__pycache__/sdlc_request.cpython-312.pyc ADDED
Binary file (1.66 kB). View file
 
src/dev_pilot/dto/__pycache__/sdlc_response.cpython-312.pyc ADDED
Binary file (827 Bytes). View file
 
src/dev_pilot/dto/sdlc_request.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel, Field
2
+ from typing import Optional
3
+
4
+ class SDLCRequest(BaseModel):
5
+ project_name: str = Field(...,
6
+ example="Ecommerce Platform",
7
+ description="The name of the project")
8
+
9
+ requirements: Optional[list[str]] = Field(None,
10
+ example=["Users can browser the products",
11
+ "Users should be able to add the product in the cart",
12
+ "Users should be able to do the payment",
13
+ "Users should be able to see their order history"],
14
+ description="The list of requirements for the project")
15
+ task_id: Optional[str] = Field(None,
16
+ example="sdlc-session-5551defc",
17
+ description="The task id of the workflow session")
18
+
19
+ next_node: Optional[str] = Field(None,
20
+ example="review_user_stories",
21
+ description="The node to be executed in the workflow. Pass the node information returned from previous API")
22
+
23
+ status: Optional[str] = Field(None,
24
+ example="approved or feedback",
25
+ description="The status of the review")
26
+
27
+ feedback: Optional[str] = Field(None,
28
+ example="The user stories are good but need to be more specific",
29
+ description="The feedback for the review")
src/dev_pilot/dto/sdlc_response.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+ from typing import Optional
3
+ from src.dev_pilot.state.sdlc_state import SDLCState
4
+ from typing import Dict, Any
5
+
6
+ class SDLCResponse(BaseModel):
7
+ status: str
8
+ message: str
9
+ task_id: Optional[str] = None
10
+ state: Optional[Dict[str, Any]] = None
11
+ error: Optional[str] = None
src/dev_pilot/graph/__init__.py ADDED
File without changes
src/dev_pilot/graph/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (222 Bytes). View file
 
src/dev_pilot/graph/__pycache__/graph_builder.cpython-312.pyc ADDED
Binary file (8.53 kB). View file
 
src/dev_pilot/graph/__pycache__/graph_executor.cpython-312.pyc ADDED
Binary file (5.49 kB). View file
 
src/dev_pilot/graph/graph_builder.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langgraph.graph import StateGraph,START, END
2
+ from src.dev_pilot.state.sdlc_state import SDLCState
3
+ from src.dev_pilot.nodes.project_requirement_node import ProjectRequirementNode
4
+ from src.dev_pilot.nodes.design_document_node import DesingDocumentNode
5
+ from src.dev_pilot.nodes.coding_node import CodingNode
6
+ from src.dev_pilot.nodes.markdown_node import MarkdownArtifactsNode
7
+ from langgraph.checkpoint.memory import MemorySaver
8
+ from langchain_core.runnables.graph import MermaidDrawMethod
9
+
10
+ class GraphBuilder:
11
+
12
+ def __init__(self, llm):
13
+ self.llm = llm
14
+ self.graph_builder = StateGraph(SDLCState)
15
+ self.memory = MemorySaver()
16
+
17
+
18
+ def build_sdlc_graph(self):
19
+ """
20
+ Configure the graph by adding nodes, edges
21
+ """
22
+
23
+ self.project_requirement_node = ProjectRequirementNode(self.llm)
24
+ self.design_document_node = DesingDocumentNode(self.llm)
25
+ self.coding_node = CodingNode(self.llm)
26
+ self.markdown_node = MarkdownArtifactsNode()
27
+
28
+ ## Nodes
29
+ self.graph_builder.add_node("initialize_project", self.project_requirement_node.initialize_project)
30
+ self.graph_builder.add_node("get_user_requirements", self.project_requirement_node.get_user_requirements)
31
+
32
+ self.graph_builder.add_node("generate_user_stories", self.project_requirement_node.generate_user_stories)
33
+ self.graph_builder.add_node("review_user_stories", self.project_requirement_node.review_user_stories)
34
+ self.graph_builder.add_node("revise_user_stories", self.project_requirement_node.revise_user_stories)
35
+
36
+ self.graph_builder.add_node("create_design_documents", self.design_document_node.create_design_documents)
37
+ self.graph_builder.add_node("review_design_documents", self.design_document_node.review_design_documents)
38
+ self.graph_builder.add_node("revise_design_documents", self.design_document_node.revise_design_documents)
39
+
40
+ self.graph_builder.add_node("generate_code", self.coding_node.generate_code)
41
+ self.graph_builder.add_node("code_review", self.coding_node.code_review)
42
+ self.graph_builder.add_node("fix_code", self.coding_node.fix_code)
43
+
44
+ self.graph_builder.add_node("security_review_recommendations", self.coding_node.security_review_recommendations)
45
+ self.graph_builder.add_node("security_review", self.coding_node.security_review)
46
+ self.graph_builder.add_node("fix_code_after_security_review", self.coding_node.fix_code_after_security_review)
47
+
48
+ self.graph_builder.add_node("write_test_cases", self.coding_node.write_test_cases)
49
+ self.graph_builder.add_node("review_test_cases", self.coding_node.review_test_cases)
50
+ self.graph_builder.add_node("revise_test_cases", self.coding_node.revise_test_cases)
51
+
52
+ self.graph_builder.add_node("qa_testing", self.coding_node.qa_testing)
53
+ self.graph_builder.add_node("qa_review", self.coding_node.qa_review)
54
+ self.graph_builder.add_node("deployment", self.coding_node.deployment)
55
+ self.graph_builder.add_node("donwload_artifacts", self.markdown_node.generate_markdown_artifacts)
56
+
57
+
58
+ ## Edges
59
+ self.graph_builder.add_edge(START,"initialize_project")
60
+ self.graph_builder.add_edge("initialize_project","get_user_requirements")
61
+ self.graph_builder.add_edge("get_user_requirements","generate_user_stories")
62
+ self.graph_builder.add_edge("generate_user_stories","review_user_stories")
63
+ self.graph_builder.add_conditional_edges(
64
+ "review_user_stories",
65
+ self.project_requirement_node.review_user_stories_router,
66
+ {
67
+ "approved": "create_design_documents",
68
+ "feedback": "revise_user_stories"
69
+ }
70
+ )
71
+ self.graph_builder.add_edge("revise_user_stories","generate_user_stories")
72
+ self.graph_builder.add_edge("create_design_documents","review_design_documents")
73
+ self.graph_builder.add_conditional_edges(
74
+ "review_design_documents",
75
+ self.design_document_node.review_design_documents_router,
76
+ {
77
+ "approved": "generate_code",
78
+ "feedback": "revise_design_documents"
79
+ }
80
+ )
81
+ self.graph_builder.add_edge("revise_design_documents","create_design_documents")
82
+ self.graph_builder.add_edge("generate_code","code_review")
83
+ self.graph_builder.add_conditional_edges(
84
+ "code_review",
85
+ self.coding_node.code_review_router,
86
+ {
87
+ "approved": "security_review_recommendations",
88
+ "feedback": "fix_code"
89
+ }
90
+ )
91
+ self.graph_builder.add_edge("fix_code","generate_code")
92
+ self.graph_builder.add_edge("security_review_recommendations","security_review")
93
+ self.graph_builder.add_conditional_edges(
94
+ "security_review",
95
+ self.coding_node.security_review_router,
96
+ {
97
+ "approved": "write_test_cases",
98
+ "feedback": "fix_code_after_security_review"
99
+ }
100
+ )
101
+ self.graph_builder.add_edge("fix_code_after_security_review","generate_code")
102
+ self.graph_builder.add_edge("write_test_cases", "review_test_cases")
103
+ self.graph_builder.add_conditional_edges(
104
+ "review_test_cases",
105
+ self.coding_node.review_test_cases_router,
106
+ {
107
+ "approved": "qa_testing",
108
+ "feedback": "revise_test_cases"
109
+ }
110
+ )
111
+ self.graph_builder.add_edge("revise_test_cases", "write_test_cases")
112
+ self.graph_builder.add_edge("qa_testing", "qa_review")
113
+ self.graph_builder.add_conditional_edges(
114
+ "qa_review",
115
+ self.coding_node.review_test_cases_router,
116
+ {
117
+ "approved": "deployment",
118
+ "feedback": "generate_code"
119
+ }
120
+ )
121
+ self.graph_builder.add_edge("deployment", "donwload_artifacts")
122
+ self.graph_builder.add_edge("donwload_artifacts", END)
123
+
124
+
125
+ def setup_graph(self):
126
+ """
127
+ Sets up the graph
128
+ """
129
+ self.build_sdlc_graph()
130
+ return self.graph_builder.compile(
131
+ interrupt_before=[
132
+ 'get_user_requirements',
133
+ 'review_user_stories',
134
+ 'review_design_documents',
135
+ 'code_review',
136
+ 'security_review',
137
+ 'review_test_cases',
138
+ 'qa_review'
139
+ ],checkpointer=self.memory
140
+ )
141
+
142
+
143
+ # def setup_graph(self):
144
+ # """
145
+ # Sets up the graph
146
+ # """
147
+ # self.build_sdlc_graph()
148
+ # graph =self.graph_builder.compile(
149
+ # interrupt_before=[
150
+ # 'get_user_requirements',
151
+ # 'review_user_stories',
152
+ # 'review_design_documents',
153
+ # 'code_review',
154
+ # 'security_review',
155
+ # 'review_test_cases',
156
+ # 'qa_review'
157
+ # ],checkpointer=self.memory
158
+ # )
159
+ # self.save_graph_image(graph)
160
+ # return graph
161
+
162
+
163
+ def save_graph_image(self,graph):
164
+ # Generate the PNG image
165
+ img_data = graph.get_graph().draw_mermaid_png(
166
+ draw_method=MermaidDrawMethod.API
167
+ )
168
+
169
+ # Save the image to a file
170
+ graph_path = "workflow_graph.png"
171
+ with open(graph_path, "wb") as f:
172
+ f.write(img_data)
173
+
174
+
src/dev_pilot/graph/graph_executor.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.dev_pilot.state.sdlc_state import SDLCState
2
+ from src.dev_pilot.cache.redis_cache import flush_redis_cache, save_state_to_redis, get_state_from_redis
3
+ import uuid
4
+ import src.dev_pilot.utils.constants as const
5
+ from loguru import logger
6
+
7
+ class GraphExecutor:
8
+ def __init__(self, graph):
9
+ self.graph = graph
10
+
11
+ def get_thread(self, task_id):
12
+ return {"configurable": {"thread_id": task_id}}
13
+
14
+ ## ------- Start the Workflow ------- ##
15
+ def start_workflow(self, project_name: str):
16
+
17
+ graph = self.graph
18
+
19
+ flush_redis_cache()
20
+
21
+ # Generate a unique task id
22
+ task_id = f"sdlc-session-{uuid.uuid4().hex[:8]}"
23
+
24
+ thread = self.get_thread(task_id)
25
+
26
+ state = None
27
+ for event in graph.stream({"project_name": project_name},thread, stream_mode="values"):
28
+ state = event
29
+
30
+ current_state = graph.get_state(thread)
31
+ save_state_to_redis(task_id, current_state)
32
+
33
+ return {"task_id" : task_id, "state": state}
34
+
35
+ ## ------- User Story Generation ------- ##
36
+ def generate_stories(self, task_id:str, requirements: list[str]):
37
+ saved_state = get_state_from_redis(task_id)
38
+ if saved_state:
39
+ saved_state['requirements'] = requirements
40
+ saved_state['next_node'] = const.REVIEW_USER_STORIES
41
+
42
+ return self.update_and_resume_graph(saved_state,task_id,"get_user_requirements")
43
+
44
+
45
+ ## ------- Generic Review Flow for all the feedback stages ------- ##
46
+ def graph_review_flow(self, task_id, status, feedback, review_type):
47
+ saved_state = get_state_from_redis(task_id)
48
+
49
+ if saved_state:
50
+ if review_type == const.REVIEW_USER_STORIES:
51
+ saved_state['user_stories_review_status'] = status
52
+ saved_state['user_stories_feedback'] = feedback
53
+ node_name = "review_user_stories"
54
+ saved_state['next_node'] = const.REVIEW_USER_STORIES if status == "feedback" else const.REVIEW_DESIGN_DOCUMENTS
55
+
56
+ elif review_type == const.REVIEW_DESIGN_DOCUMENTS:
57
+ saved_state['design_documents_review_status'] = status
58
+ saved_state['design_documents_feedback'] = feedback
59
+ node_name = "review_design_documents"
60
+ saved_state['next_node'] = const.REVIEW_DESIGN_DOCUMENTS if status == "feedback" else const.REVIEW_CODE
61
+
62
+ elif review_type == const.REVIEW_CODE:
63
+ saved_state['code_review_status'] = status
64
+ saved_state['code_review_feedback'] = feedback
65
+ node_name = "code_review"
66
+ saved_state['next_node'] = const.REVIEW_CODE if status == "feedback" else const.REVIEW_SECURITY_RECOMMENDATIONS
67
+
68
+ elif review_type == const.REVIEW_SECURITY_RECOMMENDATIONS:
69
+ saved_state['security_review_status'] = status
70
+ saved_state['security_review_comments'] = feedback
71
+ node_name = "security_review"
72
+ saved_state['next_node'] = const.REVIEW_SECURITY_RECOMMENDATIONS if status == "feedback" else const.REVIEW_TEST_CASES
73
+
74
+ elif review_type == const.REVIEW_TEST_CASES:
75
+ saved_state['test_case_review_status'] = status
76
+ saved_state['test_case_review_feedback'] = feedback
77
+ node_name = "review_test_cases"
78
+ saved_state['next_node'] = const.REVIEW_TEST_CASES if status == "feedback" else const.REVIEW_QA_TESTING
79
+
80
+ elif review_type == const.REVIEW_QA_TESTING:
81
+ saved_state['qa_testing_status'] = status
82
+ saved_state['qa_testing_feedback'] = feedback
83
+ node_name = "qa_review"
84
+ saved_state['next_node'] = const.REVIEW_QA_TESTING if status == "feedback" else const.END_NODE
85
+
86
+ else:
87
+ raise ValueError(f"Unsupported review type: {review_type}")
88
+
89
+ return self.update_and_resume_graph(saved_state,task_id,node_name)
90
+
91
+ ## -------- Helper Method to handle the graph resume state ------- ##
92
+ def update_and_resume_graph(self, saved_state,task_id, as_node):
93
+ graph = self.graph
94
+ thread = self.get_thread(task_id)
95
+
96
+ graph.update_state(thread, saved_state, as_node=as_node)
97
+
98
+ # Resume the graph
99
+ state = None
100
+ for event in graph.stream(None, thread, stream_mode="values"):
101
+ logger.debug(f"Event Received: {event}")
102
+ state = event
103
+
104
+ # saving the state before asking the product owner for review
105
+ current_state = graph.get_state(thread)
106
+ save_state_to_redis(task_id, current_state)
107
+
108
+ return {"task_id" : task_id, "state": state}
109
+
110
+
111
+ def get_updated_state(self, task_id):
112
+ saved_state = get_state_from_redis(task_id)
113
+ return {"task_id" : task_id, "state": saved_state}
114
+
src/dev_pilot/nodes/__init__.py ADDED
File without changes
src/dev_pilot/nodes/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (222 Bytes). View file
 
src/dev_pilot/nodes/__pycache__/basic_chatbot_node.cpython-312.pyc ADDED
Binary file (1.07 kB). View file
 
src/dev_pilot/nodes/__pycache__/chatbot_ai_news_node.cpython-312.pyc ADDED
Binary file (2.26 kB). View file
 
src/dev_pilot/nodes/__pycache__/chatbot_with_tools_node.cpython-312.pyc ADDED
Binary file (1.44 kB). View file
 
src/dev_pilot/nodes/__pycache__/coding_node.cpython-312.pyc ADDED
Binary file (12 kB). View file
 
src/dev_pilot/nodes/__pycache__/design_document_node.cpython-312.pyc ADDED
Binary file (7.2 kB). View file
 
src/dev_pilot/nodes/__pycache__/markdown_node.cpython-312.pyc ADDED
Binary file (6.25 kB). View file
 
src/dev_pilot/nodes/__pycache__/project_requirement_node.cpython-312.pyc ADDED
Binary file (4.21 kB). View file
 
src/dev_pilot/nodes/coding_node.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.dev_pilot.state.sdlc_state import SDLCState, UserStoryList
2
+ from src.dev_pilot.utils.Utility import Utility
3
+ from loguru import logger
4
+
5
+ class CodingNode:
6
+ """
7
+ Graph Node for the Coding
8
+
9
+ """
10
+
11
+ def __init__(self, model):
12
+ self.llm = model
13
+ self.utility = Utility()
14
+
15
+ ## ---- Code Generation ----- ##
16
+ def generate_code(self, state: SDLCState):
17
+ """
18
+ Generates the code for the given SDLC state as multiple Python files.
19
+ """
20
+ logger.info("----- Generating the code ----")
21
+
22
+ requirements = state.get('requirements', '')
23
+ user_stories = state.get('user_stories', '')
24
+ code_feedback = state.get('code_review_feedback', '') if 'code_generated' in state else ""
25
+ security_feedback = state.get('security_recommendations', '') if 'security_recommendations' in state else ""
26
+
27
+ prompt = f"""
28
+ Generate a complete Python project organized as multiple code files.
29
+ Based on the following SDLC state, generate only the Python code files with their complete implementations.
30
+ Do NOT include any explanations, requirements text, or design document details in the output—only code files with proper names and code content.
31
+
32
+ SDLC State:
33
+ ---------------
34
+ Project Name: {state['project_name']}
35
+
36
+ Requirements:
37
+ {self.utility.format_list(requirements)}
38
+
39
+ User Stories:
40
+ {self.utility.format_user_stories(user_stories)}
41
+
42
+ Functional Design Document:
43
+ {state['design_documents']['functional']}
44
+
45
+ Technical Design Document:
46
+ {state['design_documents']['technical']}
47
+
48
+ {"Note: Incorporate the following code review feedback: " + code_feedback if code_feedback else ""}
49
+ {"Note: Apply the following security recommendations: " + security_feedback if security_feedback else ""}
50
+
51
+ Instructions:
52
+ - Structure the output as multiple code files (for example, "main.py", "module1.py", etc.), each separated clearly.
53
+ - Each file should contain only the code necessary for a modular, fully-functional project based on the input state.
54
+ - Do not output any additional text, explanations, or commentary outside the code files.
55
+ - Ensure the code follows Python best practices, is syntactically correct, and is ready for development.
56
+ """
57
+ response = self.llm.invoke(prompt)
58
+ code_review_comments = self.get_code_review_comments(code=response.content)
59
+ return {
60
+ 'code_generated': response.content,
61
+ 'code_review_comments': code_review_comments
62
+ }
63
+
64
+ ## This code review comments will be used while generating test cases
65
+ def get_code_review_comments(self, code: str):
66
+ """
67
+ Generate code review comments for the provided code
68
+ """
69
+ logger.info("----- Generating code review comments ----")
70
+
71
+ # Create a prompt for the LLM to review the code
72
+ prompt = f"""
73
+ You are a coding expert. Please review the following code and provide detailed feedback:
74
+ ```
75
+ {code}
76
+ ```
77
+ Focus on:
78
+ 1. Code quality and best practices
79
+ 2. Potential bugs or edge cases
80
+ 3. Performance considerations
81
+ 4. Security concerns
82
+
83
+ End your review with an explicit APPROVED or NEEDS_FEEDBACK status.
84
+ """
85
+
86
+ # Get the review from the LLM
87
+ response = self.llm.invoke(prompt)
88
+ review_comments = response.content
89
+ return review_comments
90
+
91
+ def code_review(self, state: SDLCState):
92
+ return state
93
+
94
+ def fix_code(self, state: SDLCState):
95
+ pass
96
+
97
+ def code_review_router(self, state: SDLCState):
98
+ """
99
+ Evaluates Code review is required or not.
100
+ """
101
+ return state.get("code_review_status", "approved") # default to "approved" if not present
102
+
103
+ ## ---- Security Review ----- ##
104
+ def security_review_recommendations(self, state: SDLCState):
105
+ """
106
+ Performs security review of the code generated
107
+ """
108
+ logger.info("----- Generating security recommendations ----")
109
+
110
+ # Get the generated code from the state
111
+ code_generated = state.get('code_generated', '')
112
+
113
+ # Create a prompt for the LLM to review the code for security concerns
114
+ prompt = f"""
115
+ You are a security expert. Please review the following Python code for potential security vulnerabilities:
116
+ ```
117
+ {code_generated}
118
+ ```
119
+ Focus on:
120
+ 1. Identifying potential security risks (e.g., SQL injection, XSS, insecure data handling).
121
+ 2. Providing recommendations to mitigate these risks.
122
+ 3. Highlighting any best practices that are missing.
123
+
124
+ End your review with an explicit APPROVED or NEEDS_FEEDBACK status.
125
+ """
126
+
127
+ # Invoke the LLM to perform the security review
128
+ response = self.llm.invoke(prompt)
129
+ state["security_recommendations"] = response.content
130
+ return state
131
+
132
+ def security_review(self, state: SDLCState):
133
+ return state
134
+
135
+ def fix_code_after_security_review(self, state: SDLCState):
136
+ pass
137
+
138
+ def security_review_router(self, state: SDLCState):
139
+ """
140
+ Security Code review is required or not.
141
+ """
142
+ return state.get("security_review_status", "approved") # default to "approved" if not present
143
+
144
+ ## ---- Test Cases ----- ##
145
+ def write_test_cases(self, state: SDLCState):
146
+ """
147
+ Generates the test cases based on the generated code and code review comments
148
+ """
149
+ logger.info("----- Generating Test Cases ----")
150
+
151
+ # Get the generated code and code review comments from the state
152
+ code_generated = state.get('code_generated', '')
153
+ code_review_comments = state.get('code_review_comments', '')
154
+
155
+ # Create a prompt for the LLM to generate test cases
156
+ prompt = f"""
157
+ You are a software testing expert. Based on the following Python code and its review comments, generate comprehensive test cases:
158
+
159
+ ### Code:
160
+ ```
161
+ {code_generated}
162
+ ```
163
+
164
+ ### Code Review Comments:
165
+ {code_review_comments}
166
+
167
+ Focus on:
168
+ 1. Covering all edge cases and boundary conditions.
169
+ 2. Ensuring functional correctness of the code.
170
+ 3. Including both positive and negative test cases.
171
+ 4. Writing test cases in Python's `unittest` framework format.
172
+
173
+ Provide the test cases in Python code format, ready to be executed.
174
+ """
175
+
176
+ response = self.llm.invoke(prompt)
177
+ state["test_cases"] = response.content
178
+
179
+ return state
180
+
181
+ def review_test_cases(self, state: SDLCState):
182
+ return state
183
+
184
+ def revise_test_cases(self, state: SDLCState):
185
+ pass
186
+
187
+ def review_test_cases_router(self, state: SDLCState):
188
+ """
189
+ Evaluates Test Cases review is required or not.
190
+ """
191
+ return state.get("test_case_review_status", "approved") # default to "approved" if not present
192
+
193
+ ## ---- QA Testing ----- ##
194
+ def qa_testing(self, state: SDLCState):
195
+ """
196
+ Performs QA testing based on the generated code and test cases
197
+ """
198
+ logger.info("----- Performing QA Testing ----")
199
+ # Get the generated code and test cases from the state
200
+ code_generated = state.get('code_generated', '')
201
+ test_cases = state.get('test_cases', '')
202
+
203
+ # Create a prompt for the LLM to simulate running the test cases
204
+ prompt = f"""
205
+ You are a QA testing expert. Based on the following Python code and test cases, simulate running the test cases and provide feedback:
206
+
207
+ ### Code:
208
+ ```
209
+ {code_generated}
210
+ ```
211
+
212
+ ### Test Cases:
213
+ ```
214
+ {test_cases}
215
+ ```
216
+
217
+ Focus on:
218
+ 1. Identifying which test cases pass and which fail.
219
+ 2. Providing detailed feedback for any failed test cases, including the reason for failure.
220
+ 3. Suggesting improvements to the code or test cases if necessary.
221
+
222
+ Provide the results in the following format:
223
+ - Test Case ID: [ID]
224
+ Status: [Pass/Fail]
225
+ Feedback: [Detailed feedback if failed]
226
+ """
227
+
228
+ # Invoke the LLM to simulate QA testing
229
+ response = self.llm.invoke(prompt)
230
+ qa_testing_comments = response.content
231
+
232
+ state["qa_testing_comments"]= qa_testing_comments
233
+ return state
234
+
235
+ def qa_review(self, state: SDLCState):
236
+ pass
237
+
238
+ def deployment(self, state: SDLCState):
239
+ """
240
+ Performs the deployment
241
+ """
242
+ logger.info("----- Generating Deployment Simulation----")
243
+
244
+ code_generated = state.get('code_generated', '')
245
+
246
+ # Create a prompt for the LLM to simulate deployment
247
+ prompt = f"""
248
+ You are a DevOps expert. Based on the following Python code, simulate the deployment process and provide feedback:
249
+
250
+ ### Code:
251
+ ```
252
+ {code_generated}
253
+ ```
254
+
255
+ Focus on:
256
+ 1. Identifying potential deployment issues (e.g., missing dependencies, configuration errors).
257
+ 2. Providing recommendations to resolve any issues.
258
+ 3. Confirming whether the deployment is successful or needs further action.
259
+
260
+ Provide the results in the following format:
261
+ - Deployment Status: [Success/Failed]
262
+ - Feedback: [Detailed feedback on the deployment process]
263
+ """
264
+
265
+ # Invoke the LLM to simulate deployment
266
+ response = self.llm.invoke(prompt)
267
+ deployment_feedback = response.content
268
+
269
+ # Determine the deployment status based on the feedback
270
+ if "SUCCESS" in deployment_feedback.upper():
271
+ deployment_status = "success"
272
+ else:
273
+ deployment_status = "failed"
274
+
275
+ # Update the state with the deployment results
276
+ return {
277
+ **state,
278
+ "deployment_status": deployment_status,
279
+ "deployment_feedback": deployment_feedback
280
+ }
src/dev_pilot/nodes/design_document_node.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.dev_pilot.state.sdlc_state import SDLCState, DesignDocument
2
+ from src.dev_pilot.utils.Utility import Utility
3
+ from loguru import logger
4
+
5
+ class DesingDocumentNode:
6
+ """
7
+ Graph Node for the Desing Documents
8
+
9
+ """
10
+
11
+ def __init__(self, model):
12
+ self.llm = model
13
+ self.utility = Utility()
14
+
15
+ def create_design_documents(self, state: SDLCState):
16
+ """
17
+ Generates the Design document functional and technical
18
+ """
19
+ logger.info("----- Creating Design Document ----")
20
+ requirements = state.get('requirements', '')
21
+ user_stories = state.get('user_stories', '')
22
+ project_name = state.get('project_name', '')
23
+ design_feedback = None
24
+
25
+ if 'design_documents' in state:
26
+ design_feedback = state.get('design_documents_feedback','')
27
+
28
+ functional_documents = self.generate_functional_design(
29
+ project_name=project_name,
30
+ requirements=requirements,
31
+ user_stories=user_stories,
32
+ design_feedback=design_feedback
33
+ )
34
+
35
+ technical_documents = self.generate_technical_design(
36
+ project_name=project_name,
37
+ requirements=requirements,
38
+ user_stories=user_stories,
39
+ design_feedback=design_feedback
40
+ )
41
+
42
+ design_documents = DesignDocument(
43
+ functional=functional_documents,
44
+ technical = technical_documents
45
+ )
46
+
47
+ return {
48
+ **state,
49
+ "design_documents": design_documents,
50
+ "technical_documents": technical_documents
51
+ }
52
+
53
+ def generate_functional_design(self, project_name, requirements, user_stories, design_feedback):
54
+ """
55
+ Helper method to generate functional design document
56
+ """
57
+ logger.info("----- Creating Functional Design Document ----")
58
+ prompt = f"""
59
+ Create a comprehensive functional design document for {project_name} in Markdown format.
60
+
61
+ The document should use proper Markdown syntax with headers (# for main titles, ## for sections, etc.),
62
+ bullet points, tables, and code blocks where appropriate.
63
+
64
+ Requirements:
65
+ {self.utility.format_list(requirements)}
66
+
67
+ User Stories:
68
+ {self.utility.format_user_stories(user_stories)}
69
+
70
+ {f"When creating this functional design document, please incorporate the following feedback about the requirements: {design_feedback}" if design_feedback else ""}
71
+
72
+ The functional design document should include the following sections, each with proper Markdown formatting:
73
+
74
+ # Functional Design Document: {project_name}
75
+
76
+ ## 1. Introduction and Purpose
77
+ ## 2. Project Scope
78
+ ## 3. User Roles and Permissions
79
+ ## 4. Functional Requirements Breakdown
80
+ ## 5. User Interface Design Guidelines
81
+ ## 6. Business Process Flows
82
+ ## 7. Data Entities and Relationships
83
+ ## 8. Validation Rules
84
+ ## 9. Reporting Requirements
85
+ ## 10. Integration Points
86
+
87
+ Make sure to maintain proper Markdown formatting throughout the document.
88
+ """
89
+ # invoke the llm
90
+ response = self.llm.invoke(prompt)
91
+ return response.content
92
+
93
+ def generate_technical_design(self, project_name, requirements, user_stories, design_feedback):
94
+ """
95
+ Helper method to generate technical design document in Markdown format
96
+ """
97
+ logger.info("----- Creating Technical Design Document ----")
98
+ prompt = f"""
99
+ Create a comprehensive technical design document for {project_name} in Markdown format.
100
+
101
+ The document should use proper Markdown syntax with headers (# for main titles, ## for sections, etc.),
102
+ bullet points, tables, code blocks, and diagrams described in text form where appropriate.
103
+
104
+ Requirements:
105
+ {self.utility.format_list(requirements)}
106
+
107
+ User Stories:
108
+ {self.utility.format_user_stories(user_stories)}
109
+
110
+ {f"When creating this technical design document, please incorporate the following feedback about the requirements: {design_feedback}" if design_feedback else ""}
111
+
112
+ The technical design document should include the following sections, each with proper Markdown formatting:
113
+
114
+ # Technical Design Document: {project_name}
115
+
116
+ ## 1. System Architecture
117
+ ## 2. Technology Stack and Justification
118
+ ## 3. Database Schema
119
+ ## 4. API Specifications
120
+ ## 5. Security Considerations
121
+ ## 6. Performance Considerations
122
+ ## 7. Scalability Approach
123
+ ## 8. Deployment Strategy
124
+ ## 9. Third-party Integrations
125
+ ## 10. Development, Testing, and Deployment Environments
126
+
127
+ For any code examples, use ```language-name to specify the programming language.
128
+ For database schemas, represent tables and relationships using Markdown tables.
129
+ Make sure to maintain proper Markdown formatting throughout the document.
130
+ """
131
+ response = self.llm.invoke(prompt)
132
+ return response.content
133
+
134
+ def review_design_documents(self, state: SDLCState):
135
+ return state
136
+
137
+ def revise_design_documents(self, state: SDLCState):
138
+ pass
139
+
140
+ def review_design_documents_router(self, state: SDLCState):
141
+ """
142
+ Evaluates design review is required or not.
143
+ """
144
+ return state.get("design_documents_review_status", "approved") # default to "approved" if not present
145
+
src/dev_pilot/nodes/markdown_node.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from src.dev_pilot.state.sdlc_state import SDLCState
3
+ from src.dev_pilot.utils.Utility import Utility
4
+ from loguru import logger
5
+
6
+ class MarkdownArtifactsNode:
7
+ """
8
+ Graph Node for generating Markdown artifacts for the SDLC process.
9
+ This node generates Markdown files for:
10
+ - Project Requirements
11
+ - User Stories
12
+ - Design Documents
13
+ - Generated Code
14
+ and saves them to the "artifacts" folder.
15
+ """
16
+
17
+ def __init__(self):
18
+ self.utility = Utility()
19
+
20
+ def generate_markdown_artifacts(self, state: SDLCState):
21
+ """
22
+ Generate Markdown files for each step in the SDLC state and save them to the artifacts folder.
23
+ Returns the updated state with a new key 'artifacts' that maps to a dictionary of file paths.
24
+ """
25
+ artifacts_dir = "artifacts"
26
+ os.makedirs(artifacts_dir, exist_ok=True)
27
+
28
+ project_name = state.get("project_name", "Project")
29
+
30
+ # -- Project Requirements Markdown --
31
+ requirements = state.get("requirements", [])
32
+ md_project = f"# Project Requirement for {project_name}\n\n"
33
+ md_project += "## Requirements\n"
34
+ md_project += self.utility.format_list(requirements)
35
+ file_project = os.path.join(artifacts_dir, "Project_Requirement.md")
36
+ with open(file_project, "w") as f:
37
+ f.write(md_project)
38
+
39
+ # -- User Stories Markdown --
40
+ user_stories = state.get("user_stories", None)
41
+ file_stories = None
42
+ if user_stories:
43
+ md_stories = f"# User Stories for {project_name}\n\n"
44
+ md_stories += self.utility.format_user_stories(user_stories)
45
+ file_stories = os.path.join(artifacts_dir, "User_Stories.md")
46
+ with open(file_stories, "w") as f:
47
+ f.write(md_stories)
48
+
49
+ # -- Design Documents Markdown --
50
+ design_docs = state.get("design_documents", None)
51
+ file_design = None
52
+ if design_docs:
53
+ md_design = f"# Design Documents for {project_name}\n\n"
54
+ md_design += "## Functional Design Document\n"
55
+ md_design += design_docs.get("functional", "No Functional Design Document available.")
56
+ md_design += "\n\n## Technical Design Document\n"
57
+ md_design += design_docs.get("technical", "No Technical Design Document available.")
58
+ file_design = os.path.join(artifacts_dir, "Design_Documents.md")
59
+ with open(file_design, "w") as f:
60
+ f.write(md_design)
61
+
62
+ # -- Generated Code Markdown --
63
+ code_generated = state.get("code_generated", None)
64
+ file_code = None
65
+ if code_generated:
66
+ md_code = f"# Generated Code for {project_name}\n\n"
67
+ md_code += "\n" + code_generated
68
+ file_code = os.path.join(artifacts_dir, "Generated_Code.md")
69
+ with open(file_code, "w") as f:
70
+ f.write(md_code)
71
+
72
+ # -- Security Recommendations Markdown --
73
+ security_recommendations = state.get("security_recommendations", None)
74
+ file_security = None
75
+ if security_recommendations:
76
+ md_security = f"# Security Recommendations for {project_name}\n\n"
77
+ md_security += security_recommendations
78
+ file_security = os.path.join(artifacts_dir, "Security_Recommendations.md")
79
+ with open(file_security, "w") as f:
80
+ f.write(md_security)
81
+
82
+ # -- Test Cases Markdown --
83
+ test_cases = state.get("test_cases", None)
84
+ file_tests = None
85
+ if test_cases:
86
+ md_tests = f"# Test Cases for {project_name}\n\n"
87
+ md_tests += "\n" + test_cases
88
+ file_tests = os.path.join(artifacts_dir, "Test_Cases.md")
89
+ with open(file_tests, "w") as f:
90
+ f.write(md_tests)
91
+
92
+ # -- QA Testing Comments Markdown --
93
+ qa_testing_comments = state.get("qa_testing_comments", None)
94
+ file_qa = None
95
+ if qa_testing_comments:
96
+ md_qa = f"# QA Testing Comments for {project_name}\n\n"
97
+ md_qa += qa_testing_comments
98
+ file_qa = os.path.join(artifacts_dir, "QA_Testing_Comments.md")
99
+ with open(file_qa, "w") as f:
100
+ f.write(md_qa)
101
+
102
+ # -- Deployment Feedback Markdown --
103
+ deployment_feedback = state.get("deployment_feedback", None)
104
+ file_deployment = None
105
+ if deployment_feedback:
106
+ md_deployment = f"# Deployment Feedback for {project_name}\n\n"
107
+ md_deployment += deployment_feedback
108
+ file_deployment = os.path.join(artifacts_dir, "Deployment_Feedback.md")
109
+ with open(file_deployment, "w") as f:
110
+ f.write(md_deployment)
111
+
112
+ # Update the state with the paths to the generated artifact files.
113
+ state["artifacts"] = {
114
+ "Project_Requirements": file_project,
115
+ "User_Stories": file_stories,
116
+ "Design_Documents": file_design,
117
+ "Generated_Code": file_code,
118
+ "Security_Recommendations": file_security,
119
+ "Test_Cases": file_tests,
120
+ "QA_Testing_Comments": file_qa,
121
+ "Deployment_Feedback": file_deployment
122
+ }
123
+ logger.info("Markdown artifacts generated in folder:", artifacts_dir)
124
+ return state
src/dev_pilot/nodes/project_requirement_node.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from src.dev_pilot.state.sdlc_state import SDLCState, UserStoryList
2
+ from langchain_core.messages import SystemMessage
3
+
4
+ class ProjectRequirementNode:
5
+ """
6
+ Graph Node for the project requirements
7
+
8
+ """
9
+
10
+ def __init__(self, model):
11
+ self.llm = model
12
+
13
+ def initialize_project(self, state: SDLCState):
14
+ """
15
+ Performs the project initilazation
16
+ """
17
+ return state
18
+
19
+ def get_user_requirements(self, state: SDLCState):
20
+ """
21
+ Gets the requirements from the user
22
+ """
23
+ pass
24
+
25
+ def generate_user_stories(self, state: SDLCState):
26
+ """
27
+ Auto-generate highly detailed and accurate user stories for each requirement.
28
+ """
29
+ project_name = state["project_name"]
30
+ requirements = state["requirements"]
31
+ feedback_reason = state.get("user_stories_feedback", None)
32
+
33
+ prompt = f"""
34
+ You are a senior software analyst specializing in Agile SDLC and user story generation.
35
+ Your task is to generate **a separate and detailed user story for EACH requirement** from the project details below.
36
+
37
+ ---
38
+ **Project Name:** "{project_name}"
39
+
40
+ **Requirements:** "{requirements}
41
+
42
+ ---
43
+ **Instructions for User Story Generation:**
44
+ - Create **one user story per requirement**.
45
+ - Assign a **unique identifier** (e.g., US-001, US-002, etc.).
46
+ - Provide a **clear and concise title** summarizing the user story.
47
+ - Write a **detailed description** using the "As a [user role], I want [goal] so that [benefit]" format.
48
+ - Assign a **priority level** (1 = Critical, 2 = High, 3 = Medium, 4 = Low).
49
+ - Define **acceptance criteria** with bullet points to ensure testability.
50
+ - Use **domain-specific terminology** for clarity.
51
+
52
+ {f"Additionally, consider the following feedback while refining the user stories: {feedback_reason}" if feedback_reason else ""}
53
+
54
+ ---
55
+ **Expected Output Format (for each user story):**
56
+ - Unique Identifier: US-XXX
57
+ - Title: [User Story Title]
58
+ - Description:
59
+ - As a [user role], I want [feature] so that [benefit].
60
+ - Priority: [1-4]
61
+ - Acceptance Criteria:
62
+ - [Criteria 1]
63
+ - [Criteria 2]
64
+ - [Criteria 3]
65
+
66
+ Ensure that the user stories are **specific, testable, and aligned with Agile principles**.
67
+ """
68
+
69
+ llm_with_structured = self.llm.with_structured_output(UserStoryList)
70
+ response = llm_with_structured.invoke(prompt)
71
+ state["user_stories"] = response
72
+ return state
73
+
74
+ def review_user_stories(self, state: SDLCState):
75
+ return state
76
+
77
+ def revise_user_stories(self, state: SDLCState):
78
+ pass
79
+
80
+ def review_user_stories_router(self, state: SDLCState):
81
+ return state.get("user_stories_review_status", "approved") # default to "approved" if not present
src/dev_pilot/state/__init__.py ADDED
File without changes