diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..c36f4a42280ea34bb2a85ad1c9205bf7823c46f1
--- /dev/null
+++ b/app.py
@@ -0,0 +1,7 @@
+import argparse
+import subprocess
+import sys
+from src.dev_pilot.ui.streamlit_ui.streamlit_app import load_app
+
+if __name__ == "__main__":
+ load_app()
\ No newline at end of file
diff --git a/app_api.py b/app_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..408228702f630d0d4c949c20cb9e3b63de83797e
--- /dev/null
+++ b/app_api.py
@@ -0,0 +1,4 @@
+from src.dev_pilot.api.fastapi_app import load_app
+
+if __name__ == "__main__":
+ load_app()
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..00b157391977022ab10de7f4fc407e401e27e3e0
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,14 @@
+langchain
+langgraph
+langchain_community
+langchain_core
+langchain_groq
+langchain_openai
+faiss_cpu
+streamlit
+langchain-google-genai
+redis
+upstash_redis
+fastapi
+uvicorn
+loguru
\ No newline at end of file
diff --git a/src/__init__.py b/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/__pycache__/__init__.cpython-312.pyc b/src/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0408c78773090d9c87d12794507dcc2f56741470
Binary files /dev/null and b/src/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/LLMS/__init__.py b/src/dev_pilot/LLMS/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/LLMS/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/LLMS/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1e4746f029956a9d387cfcc49bdd9cf7ad563548
Binary files /dev/null and b/src/dev_pilot/LLMS/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/LLMS/__pycache__/geminillm.cpython-312.pyc b/src/dev_pilot/LLMS/__pycache__/geminillm.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..90e880f0d1760ab68479f0dc0e0a9ef2dc50c8c3
Binary files /dev/null and b/src/dev_pilot/LLMS/__pycache__/geminillm.cpython-312.pyc differ
diff --git a/src/dev_pilot/LLMS/__pycache__/groqllm.cpython-312.pyc b/src/dev_pilot/LLMS/__pycache__/groqllm.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8a11bb9eb79ba0ffb691aee2c56489e3efc37148
Binary files /dev/null and b/src/dev_pilot/LLMS/__pycache__/groqllm.cpython-312.pyc differ
diff --git a/src/dev_pilot/LLMS/__pycache__/openai_llm.cpython-312.pyc b/src/dev_pilot/LLMS/__pycache__/openai_llm.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c7b9b2115e24d745030f1155a75941e607247277
Binary files /dev/null and b/src/dev_pilot/LLMS/__pycache__/openai_llm.cpython-312.pyc differ
diff --git a/src/dev_pilot/LLMS/geminillm.py b/src/dev_pilot/LLMS/geminillm.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e7613354989e73e7cc31918f3e941fcdab44fdd
--- /dev/null
+++ b/src/dev_pilot/LLMS/geminillm.py
@@ -0,0 +1,24 @@
+import os
+import streamlit as st
+from langchain_google_genai import ChatGoogleGenerativeAI
+
+
+class GeminiLLM:
+ def __init__(self, user_controls_input=None, model=None, api_key=None):
+ self.user_controls_input = user_controls_input
+ self.model = model
+ self.api_key = api_key
+
+ def get_llm_model(self):
+ try:
+ if self.user_controls_input:
+ gemini_api_key = self.user_controls_input['GEMINI_API_KEY']
+ selected_gemini_model = self.user_controls_input['selected_gemini_model']
+ llm = ChatGoogleGenerativeAI(api_key=gemini_api_key, model= selected_gemini_model)
+ else:
+ llm = ChatGoogleGenerativeAI(api_key=self.api_key,model=self.model)
+
+ except Exception as e:
+ raise ValueError(f"Error occured with Exception : {e}")
+
+ return llm
\ No newline at end of file
diff --git a/src/dev_pilot/LLMS/groqllm.py b/src/dev_pilot/LLMS/groqllm.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c5648da8ccec67018faf5a00035dc265dc0ca77
--- /dev/null
+++ b/src/dev_pilot/LLMS/groqllm.py
@@ -0,0 +1,25 @@
+import os
+import streamlit as st
+from langchain_groq import ChatGroq
+
+
+class GroqLLM:
+ def __init__(self, user_controls_input=None, model=None, api_key=None):
+ self.user_controls_input = user_controls_input
+ self.model = model
+ self.api_key = api_key
+
+
+ def get_llm_model(self):
+ try:
+ if self.user_controls_input:
+ groq_api_key = self.user_controls_input['GROQ_API_KEY']
+ selected_groq_model = self.user_controls_input['selected_groq_model']
+ llm = ChatGroq(api_key=groq_api_key, model= selected_groq_model)
+ else:
+ llm = ChatGroq(api_key=self.api_key,model=self.model)
+
+ except Exception as e:
+ raise ValueError(f"Error occured with Exception : {e}")
+
+ return llm
\ No newline at end of file
diff --git a/src/dev_pilot/LLMS/openai_llm.py b/src/dev_pilot/LLMS/openai_llm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0811cb4ac58e7acd3bb8de16dd3607d459c234d
--- /dev/null
+++ b/src/dev_pilot/LLMS/openai_llm.py
@@ -0,0 +1,25 @@
+import os
+import streamlit as st
+from langchain_openai import ChatOpenAI
+
+
+class OpenAILLM:
+ def __init__(self, user_controls_input=None, model=None, api_key=None):
+ self.user_controls_input = user_controls_input
+ self.model = model
+ self.api_key = api_key
+
+
+ def get_llm_model(self):
+ try:
+ if self.user_controls_input:
+ openai_api_key = self.user_controls_input['OPENAI_API_KEY']
+ selected_openai_model = self.user_controls_input['selected_openai_model']
+ llm = ChatOpenAI(api_key=openai_api_key, model= selected_openai_model)
+ else:
+ llm = ChatOpenAI(api_key=openai_api_key, model= self.model)
+
+ except Exception as e:
+ raise ValueError(f"Error occured with Exception : {e}")
+
+ return llm
\ No newline at end of file
diff --git a/src/dev_pilot/__init__.py b/src/dev_pilot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1aec60b682aab0b4874498b6cfec133334c025d5
Binary files /dev/null and b/src/dev_pilot/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/__pycache__/main.cpython-312.pyc b/src/dev_pilot/__pycache__/main.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e3bbee827c4e1cdad8e4569b031a53060402efa5
Binary files /dev/null and b/src/dev_pilot/__pycache__/main.cpython-312.pyc differ
diff --git a/src/dev_pilot/api/__init__.py b/src/dev_pilot/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/api/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/api/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05426bed8f253885553fe2e3bb436dbd78bde4c7
Binary files /dev/null and b/src/dev_pilot/api/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/api/__pycache__/fastapi_app.cpython-312.pyc b/src/dev_pilot/api/__pycache__/fastapi_app.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3abeff64d44e6600ea594566941eb0aa331ed3a0
Binary files /dev/null and b/src/dev_pilot/api/__pycache__/fastapi_app.cpython-312.pyc differ
diff --git a/src/dev_pilot/api/fastapi_app.py b/src/dev_pilot/api/fastapi_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..46867eabc5248497e1fe88870468097f37435a27
--- /dev/null
+++ b/src/dev_pilot/api/fastapi_app.py
@@ -0,0 +1,202 @@
+from fastapi import FastAPI, HTTPException, Depends, Request
+from fastapi.middleware.cors import CORSMiddleware
+from fastapi.responses import JSONResponse
+import os
+from dotenv import load_dotenv
+from functools import lru_cache
+from src.dev_pilot.LLMS.groqllm import GroqLLM
+from src.dev_pilot.LLMS.geminillm import GeminiLLM
+from src.dev_pilot.graph.graph_builder import GraphBuilder
+from src.dev_pilot.graph.graph_executor import GraphExecutor
+from src.dev_pilot.dto.sdlc_request import SDLCRequest
+from src.dev_pilot.dto.sdlc_response import SDLCResponse
+import uvicorn
+from contextlib import asynccontextmanager
+from src.dev_pilot.utils.logging_config import setup_logging
+from loguru import logger
+
+## Setup logging level
+setup_logging(log_level="DEBUG")
+
+gemini_models = [
+ "gemini-2.0-flash",
+ "gemini-2.0-flash-lite",
+ "gemini-2.5-pro-exp-03-25"
+]
+
+groq_models = [
+ "gemma2-9b-it",
+ "llama3-8b-8192",
+ "llama3-70b-8192"
+]
+
+def load_app():
+ uvicorn.run(app, host="0.0.0.0", port=8000)
+
+class Settings:
+ def __init__(self):
+ self.GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
+ self.GROQ_API_KEY = os.getenv("GROQ_API_KEY")
+
+@lru_cache()
+def get_settings():
+ return Settings()
+
+def validate_api_keys(settings: Settings = Depends(get_settings)):
+ required_keys = {
+ 'GEMINI_API_KEY': settings.GEMINI_API_KEY,
+ 'GROQ_API_KEY': settings.GROQ_API_KEY
+ }
+
+ missing_keys = [key for key, value in required_keys.items() if not value]
+ if missing_keys:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Missing required API keys: {', '.join(missing_keys)}"
+ )
+ return settings
+
+
+# Initialize the LLM and GraphBuilder instances once and store them in the app state
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ settings = get_settings()
+ llm = GeminiLLM(model=gemini_models[0], api_key=settings.GEMINI_API_KEY).get_llm_model()
+ graph_builder = GraphBuilder(llm=llm)
+ graph = graph_builder.setup_graph()
+ graph_executor = GraphExecutor(graph)
+ app.state.llm = llm
+ app.state.graph = graph
+ app.state.graph_executor = graph_executor
+ yield
+ # Clean up resources if needed
+ app.state.llm = None
+ app.state.graph = None
+ app.state.graph_executor = None
+
+app = FastAPI(
+ title="DevPilot API",
+ description="AI-powered SDLC API using Langgraph",
+ version="1.0.0",
+ lifespan=lifespan
+)
+
+logger.info("Application starting up...")
+
+# Configure CORS
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"], # In production, replace with specific origins
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+@app.get("/")
+async def root():
+ return {
+ "message": "Welcome to DevPilot API",
+ "docs_url": "/docs",
+ "redoc_url": "/redoc"
+ }
+
+@app.post("/api/v1/sdlc/start", response_model=SDLCResponse)
+async def start_sdlc(
+ sdlc_request: SDLCRequest,
+ settings: Settings = Depends(validate_api_keys)
+ ):
+
+ try:
+ graph_executor = app.state.graph_executor
+
+ if isinstance (graph_executor, GraphExecutor) == False:
+ raise Exception("Graph Executor not initialized")
+
+ graph_response = graph_executor.start_workflow(sdlc_request.project_name)
+
+ logger.debug(f"Start Workflow Response: {graph_response}")
+
+ return SDLCResponse(
+ status="success",
+ message="SDLC process started successfully",
+ task_id=graph_response["task_id"],
+ state=graph_response["state"]
+ )
+
+ except Exception as e:
+ error_response = SDLCResponse(
+ status="error",
+ message="Failed to start the process",
+ error=str(e)
+ )
+ return JSONResponse(status_code=500, content=error_response.model_dump())
+
+
+@app.post("/api/v1/sdlc/user_stories", response_model=SDLCResponse)
+async def start_sdlc(
+ sdlc_request: SDLCRequest,
+ settings: Settings = Depends(validate_api_keys)
+ ):
+
+ try:
+ graph_executor = app.state.graph_executor
+
+ if isinstance (graph_executor, GraphExecutor) == False:
+ raise Exception("Graph Executor not initialized")
+
+ graph_response = graph_executor.generate_stories(sdlc_request.task_id, sdlc_request.requirements)
+
+ logger.debug(f"Generate Stories Response: {graph_response}")
+
+ return SDLCResponse(
+ status="success",
+ message="User Stories generated successfully",
+ task_id=graph_response["task_id"],
+ state=graph_response["state"]
+ )
+
+ except Exception as e:
+ error_response = SDLCResponse(
+ status="error",
+ message="Failed to generate user stories",
+ error=str(e)
+ )
+ return JSONResponse(status_code=500, content=error_response.model_dump())
+
+
+@app.post("/api/v1/sdlc/progress_flow", response_model=SDLCResponse)
+async def progress_sdlc(
+ sdlc_request: SDLCRequest,
+ settings: Settings = Depends(validate_api_keys)
+ ):
+
+ try:
+
+ graph_executor = app.state.graph_executor
+
+ if isinstance (graph_executor, GraphExecutor) == False:
+ raise Exception("Graph Executor not initialized")
+
+ graph_response = graph_executor.graph_review_flow(
+ sdlc_request.task_id,
+ sdlc_request.status,
+ sdlc_request.feedback,
+ sdlc_request.next_node)
+
+ logger.debug(f"Flow Node: {sdlc_request.next_node}")
+ logger.debug(f"Progress Flow Response: {graph_response}")
+
+ return SDLCResponse(
+ status="success",
+ message="Flow progressed successfully to next step",
+ task_id=graph_response["task_id"],
+ state=graph_response["state"]
+ )
+
+ except Exception as e:
+ error_response = SDLCResponse(
+ status="error",
+ message="Failed to progress the flow",
+ error=str(e)
+ )
+ return JSONResponse(status_code=500, content=error_response.model_dump())
\ No newline at end of file
diff --git a/src/dev_pilot/cache/__init__.py b/src/dev_pilot/cache/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/cache/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/cache/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..14534735f4a4f6e38e51278ff99a830e16c05e08
Binary files /dev/null and b/src/dev_pilot/cache/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/cache/__pycache__/redis_cache.cpython-312.pyc b/src/dev_pilot/cache/__pycache__/redis_cache.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5aafe4032c82f5142d95386b0e64f5eaa5daa80a
Binary files /dev/null and b/src/dev_pilot/cache/__pycache__/redis_cache.cpython-312.pyc differ
diff --git a/src/dev_pilot/cache/redis_cache.py b/src/dev_pilot/cache/redis_cache.py
new file mode 100644
index 0000000000000000000000000000000000000000..03e7559dbe5685bcccdd099f92d66eda7da99000
--- /dev/null
+++ b/src/dev_pilot/cache/redis_cache.py
@@ -0,0 +1,54 @@
+import redis
+import json
+from typing import Optional
+from src.dev_pilot.state.sdlc_state import CustomEncoder, SDLCState
+from upstash_redis import Redis
+import os
+from dotenv import load_dotenv
+from loguru import logger
+
+load_dotenv()
+
+
+# Initialize Redis client
+
+## Upstash Redis Client Configuraion
+REDIS_URL = os.getenv("REDIS_URL")
+REDIS_TOKEN = os.getenv("REDIS_TOKEN")
+redis_client = redis = Redis(url=REDIS_URL, token=REDIS_TOKEN)
+
+## For testing locally with docker
+# redis_client = redis.Redis(
+# host='localhost', # Replace with your Redis host
+# port=6379, # Replace with your Redis port
+# db=0 # Replace with your Redis database number
+# )
+
+def save_state_to_redis(task_id: str, state: SDLCState):
+ """Save the state to Redis."""
+ state = json.dumps(state, cls=CustomEncoder)
+ redis_client.set(task_id, state)
+
+ # Set expiration for 24 hours
+ redis_client.expire(task_id, 86400)
+
+def get_state_from_redis(task_id: str) -> Optional[SDLCState]:
+ """ Retrieves the state from redis """
+ state_json = redis_client.get(task_id)
+ if not state_json:
+ return None
+
+ state_dict = json.loads(state_json)[0]
+ return SDLCState(**state_dict)
+
+def delete_from_redis(task_id: str):
+ """ Delete from redis """
+ redis_client.delete(task_id)
+
+def flush_redis_cache():
+ """ Flushes the whole cache"""
+
+ # Clear all keys in all databases
+ redis_client.flushall()
+
+ logger.info("--- Redis cache cleared ---")
\ No newline at end of file
diff --git a/src/dev_pilot/dto/__init__.py b/src/dev_pilot/dto/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/dto/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/dto/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..8db7d65d6ecbaf24c81c357f77735aa43803146a
Binary files /dev/null and b/src/dev_pilot/dto/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/dto/__pycache__/sdlc_request.cpython-312.pyc b/src/dev_pilot/dto/__pycache__/sdlc_request.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cc36ead630a17072b43dc1e09284589c90ed588e
Binary files /dev/null and b/src/dev_pilot/dto/__pycache__/sdlc_request.cpython-312.pyc differ
diff --git a/src/dev_pilot/dto/__pycache__/sdlc_response.cpython-312.pyc b/src/dev_pilot/dto/__pycache__/sdlc_response.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1d066b812c25dab4a567ff0c8ab8df5aa1d7a4a5
Binary files /dev/null and b/src/dev_pilot/dto/__pycache__/sdlc_response.cpython-312.pyc differ
diff --git a/src/dev_pilot/dto/sdlc_request.py b/src/dev_pilot/dto/sdlc_request.py
new file mode 100644
index 0000000000000000000000000000000000000000..65e7b1bf5762558795ccf3a01074cb3f769bf36c
--- /dev/null
+++ b/src/dev_pilot/dto/sdlc_request.py
@@ -0,0 +1,29 @@
+from pydantic import BaseModel, Field
+from typing import Optional
+
+class SDLCRequest(BaseModel):
+ project_name: str = Field(...,
+ example="Ecommerce Platform",
+ description="The name of the project")
+
+ requirements: Optional[list[str]] = Field(None,
+ example=["Users can browser the products",
+ "Users should be able to add the product in the cart",
+ "Users should be able to do the payment",
+ "Users should be able to see their order history"],
+ description="The list of requirements for the project")
+ task_id: Optional[str] = Field(None,
+ example="sdlc-session-5551defc",
+ description="The task id of the workflow session")
+
+ next_node: Optional[str] = Field(None,
+ example="review_user_stories",
+ description="The node to be executed in the workflow. Pass the node information returned from previous API")
+
+ status: Optional[str] = Field(None,
+ example="approved or feedback",
+ description="The status of the review")
+
+ feedback: Optional[str] = Field(None,
+ example="The user stories are good but need to be more specific",
+ description="The feedback for the review")
diff --git a/src/dev_pilot/dto/sdlc_response.py b/src/dev_pilot/dto/sdlc_response.py
new file mode 100644
index 0000000000000000000000000000000000000000..50f61f217c9be2b9766ed75b343a30c497165816
--- /dev/null
+++ b/src/dev_pilot/dto/sdlc_response.py
@@ -0,0 +1,11 @@
+from pydantic import BaseModel
+from typing import Optional
+from src.dev_pilot.state.sdlc_state import SDLCState
+from typing import Dict, Any
+
+class SDLCResponse(BaseModel):
+ status: str
+ message: str
+ task_id: Optional[str] = None
+ state: Optional[Dict[str, Any]] = None
+ error: Optional[str] = None
\ No newline at end of file
diff --git a/src/dev_pilot/graph/__init__.py b/src/dev_pilot/graph/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/graph/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/graph/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..beff2788b8862d414dd932b7b75034b8c0c6fc54
Binary files /dev/null and b/src/dev_pilot/graph/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/graph/__pycache__/graph_builder.cpython-312.pyc b/src/dev_pilot/graph/__pycache__/graph_builder.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f4ba1968f47c04307de9e988a7fb8a69008414f
Binary files /dev/null and b/src/dev_pilot/graph/__pycache__/graph_builder.cpython-312.pyc differ
diff --git a/src/dev_pilot/graph/__pycache__/graph_executor.cpython-312.pyc b/src/dev_pilot/graph/__pycache__/graph_executor.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..812b8d5798d69754895ae6b76d07b032532192cb
Binary files /dev/null and b/src/dev_pilot/graph/__pycache__/graph_executor.cpython-312.pyc differ
diff --git a/src/dev_pilot/graph/graph_builder.py b/src/dev_pilot/graph/graph_builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..e602f47d70627eed18527ea5b28a3b67d8ee6bd4
--- /dev/null
+++ b/src/dev_pilot/graph/graph_builder.py
@@ -0,0 +1,174 @@
+from langgraph.graph import StateGraph,START, END
+from src.dev_pilot.state.sdlc_state import SDLCState
+from src.dev_pilot.nodes.project_requirement_node import ProjectRequirementNode
+from src.dev_pilot.nodes.design_document_node import DesingDocumentNode
+from src.dev_pilot.nodes.coding_node import CodingNode
+from src.dev_pilot.nodes.markdown_node import MarkdownArtifactsNode
+from langgraph.checkpoint.memory import MemorySaver
+from langchain_core.runnables.graph import MermaidDrawMethod
+
+class GraphBuilder:
+
+ def __init__(self, llm):
+ self.llm = llm
+ self.graph_builder = StateGraph(SDLCState)
+ self.memory = MemorySaver()
+
+
+ def build_sdlc_graph(self):
+ """
+ Configure the graph by adding nodes, edges
+ """
+
+ self.project_requirement_node = ProjectRequirementNode(self.llm)
+ self.design_document_node = DesingDocumentNode(self.llm)
+ self.coding_node = CodingNode(self.llm)
+ self.markdown_node = MarkdownArtifactsNode()
+
+ ## Nodes
+ self.graph_builder.add_node("initialize_project", self.project_requirement_node.initialize_project)
+ self.graph_builder.add_node("get_user_requirements", self.project_requirement_node.get_user_requirements)
+
+ self.graph_builder.add_node("generate_user_stories", self.project_requirement_node.generate_user_stories)
+ self.graph_builder.add_node("review_user_stories", self.project_requirement_node.review_user_stories)
+ self.graph_builder.add_node("revise_user_stories", self.project_requirement_node.revise_user_stories)
+
+ self.graph_builder.add_node("create_design_documents", self.design_document_node.create_design_documents)
+ self.graph_builder.add_node("review_design_documents", self.design_document_node.review_design_documents)
+ self.graph_builder.add_node("revise_design_documents", self.design_document_node.revise_design_documents)
+
+ self.graph_builder.add_node("generate_code", self.coding_node.generate_code)
+ self.graph_builder.add_node("code_review", self.coding_node.code_review)
+ self.graph_builder.add_node("fix_code", self.coding_node.fix_code)
+
+ self.graph_builder.add_node("security_review_recommendations", self.coding_node.security_review_recommendations)
+ self.graph_builder.add_node("security_review", self.coding_node.security_review)
+ self.graph_builder.add_node("fix_code_after_security_review", self.coding_node.fix_code_after_security_review)
+
+ self.graph_builder.add_node("write_test_cases", self.coding_node.write_test_cases)
+ self.graph_builder.add_node("review_test_cases", self.coding_node.review_test_cases)
+ self.graph_builder.add_node("revise_test_cases", self.coding_node.revise_test_cases)
+
+ self.graph_builder.add_node("qa_testing", self.coding_node.qa_testing)
+ self.graph_builder.add_node("qa_review", self.coding_node.qa_review)
+ self.graph_builder.add_node("deployment", self.coding_node.deployment)
+ self.graph_builder.add_node("donwload_artifacts", self.markdown_node.generate_markdown_artifacts)
+
+
+ ## Edges
+ self.graph_builder.add_edge(START,"initialize_project")
+ self.graph_builder.add_edge("initialize_project","get_user_requirements")
+ self.graph_builder.add_edge("get_user_requirements","generate_user_stories")
+ self.graph_builder.add_edge("generate_user_stories","review_user_stories")
+ self.graph_builder.add_conditional_edges(
+ "review_user_stories",
+ self.project_requirement_node.review_user_stories_router,
+ {
+ "approved": "create_design_documents",
+ "feedback": "revise_user_stories"
+ }
+ )
+ self.graph_builder.add_edge("revise_user_stories","generate_user_stories")
+ self.graph_builder.add_edge("create_design_documents","review_design_documents")
+ self.graph_builder.add_conditional_edges(
+ "review_design_documents",
+ self.design_document_node.review_design_documents_router,
+ {
+ "approved": "generate_code",
+ "feedback": "revise_design_documents"
+ }
+ )
+ self.graph_builder.add_edge("revise_design_documents","create_design_documents")
+ self.graph_builder.add_edge("generate_code","code_review")
+ self.graph_builder.add_conditional_edges(
+ "code_review",
+ self.coding_node.code_review_router,
+ {
+ "approved": "security_review_recommendations",
+ "feedback": "fix_code"
+ }
+ )
+ self.graph_builder.add_edge("fix_code","generate_code")
+ self.graph_builder.add_edge("security_review_recommendations","security_review")
+ self.graph_builder.add_conditional_edges(
+ "security_review",
+ self.coding_node.security_review_router,
+ {
+ "approved": "write_test_cases",
+ "feedback": "fix_code_after_security_review"
+ }
+ )
+ self.graph_builder.add_edge("fix_code_after_security_review","generate_code")
+ self.graph_builder.add_edge("write_test_cases", "review_test_cases")
+ self.graph_builder.add_conditional_edges(
+ "review_test_cases",
+ self.coding_node.review_test_cases_router,
+ {
+ "approved": "qa_testing",
+ "feedback": "revise_test_cases"
+ }
+ )
+ self.graph_builder.add_edge("revise_test_cases", "write_test_cases")
+ self.graph_builder.add_edge("qa_testing", "qa_review")
+ self.graph_builder.add_conditional_edges(
+ "qa_review",
+ self.coding_node.review_test_cases_router,
+ {
+ "approved": "deployment",
+ "feedback": "generate_code"
+ }
+ )
+ self.graph_builder.add_edge("deployment", "donwload_artifacts")
+ self.graph_builder.add_edge("donwload_artifacts", END)
+
+
+ def setup_graph(self):
+ """
+ Sets up the graph
+ """
+ self.build_sdlc_graph()
+ return self.graph_builder.compile(
+ interrupt_before=[
+ 'get_user_requirements',
+ 'review_user_stories',
+ 'review_design_documents',
+ 'code_review',
+ 'security_review',
+ 'review_test_cases',
+ 'qa_review'
+ ],checkpointer=self.memory
+ )
+
+
+ # def setup_graph(self):
+ # """
+ # Sets up the graph
+ # """
+ # self.build_sdlc_graph()
+ # graph =self.graph_builder.compile(
+ # interrupt_before=[
+ # 'get_user_requirements',
+ # 'review_user_stories',
+ # 'review_design_documents',
+ # 'code_review',
+ # 'security_review',
+ # 'review_test_cases',
+ # 'qa_review'
+ # ],checkpointer=self.memory
+ # )
+ # self.save_graph_image(graph)
+ # return graph
+
+
+ def save_graph_image(self,graph):
+ # Generate the PNG image
+ img_data = graph.get_graph().draw_mermaid_png(
+ draw_method=MermaidDrawMethod.API
+ )
+
+ # Save the image to a file
+ graph_path = "workflow_graph.png"
+ with open(graph_path, "wb") as f:
+ f.write(img_data)
+
+
\ No newline at end of file
diff --git a/src/dev_pilot/graph/graph_executor.py b/src/dev_pilot/graph/graph_executor.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b7aa777377210fdc4ed1e246e3cc1688231be94
--- /dev/null
+++ b/src/dev_pilot/graph/graph_executor.py
@@ -0,0 +1,114 @@
+from src.dev_pilot.state.sdlc_state import SDLCState
+from src.dev_pilot.cache.redis_cache import flush_redis_cache, save_state_to_redis, get_state_from_redis
+import uuid
+import src.dev_pilot.utils.constants as const
+from loguru import logger
+
+class GraphExecutor:
+ def __init__(self, graph):
+ self.graph = graph
+
+ def get_thread(self, task_id):
+ return {"configurable": {"thread_id": task_id}}
+
+ ## ------- Start the Workflow ------- ##
+ def start_workflow(self, project_name: str):
+
+ graph = self.graph
+
+ flush_redis_cache()
+
+ # Generate a unique task id
+ task_id = f"sdlc-session-{uuid.uuid4().hex[:8]}"
+
+ thread = self.get_thread(task_id)
+
+ state = None
+ for event in graph.stream({"project_name": project_name},thread, stream_mode="values"):
+ state = event
+
+ current_state = graph.get_state(thread)
+ save_state_to_redis(task_id, current_state)
+
+ return {"task_id" : task_id, "state": state}
+
+ ## ------- User Story Generation ------- ##
+ def generate_stories(self, task_id:str, requirements: list[str]):
+ saved_state = get_state_from_redis(task_id)
+ if saved_state:
+ saved_state['requirements'] = requirements
+ saved_state['next_node'] = const.REVIEW_USER_STORIES
+
+ return self.update_and_resume_graph(saved_state,task_id,"get_user_requirements")
+
+
+ ## ------- Generic Review Flow for all the feedback stages ------- ##
+ def graph_review_flow(self, task_id, status, feedback, review_type):
+ saved_state = get_state_from_redis(task_id)
+
+ if saved_state:
+ if review_type == const.REVIEW_USER_STORIES:
+ saved_state['user_stories_review_status'] = status
+ saved_state['user_stories_feedback'] = feedback
+ node_name = "review_user_stories"
+ saved_state['next_node'] = const.REVIEW_USER_STORIES if status == "feedback" else const.REVIEW_DESIGN_DOCUMENTS
+
+ elif review_type == const.REVIEW_DESIGN_DOCUMENTS:
+ saved_state['design_documents_review_status'] = status
+ saved_state['design_documents_feedback'] = feedback
+ node_name = "review_design_documents"
+ saved_state['next_node'] = const.REVIEW_DESIGN_DOCUMENTS if status == "feedback" else const.REVIEW_CODE
+
+ elif review_type == const.REVIEW_CODE:
+ saved_state['code_review_status'] = status
+ saved_state['code_review_feedback'] = feedback
+ node_name = "code_review"
+ saved_state['next_node'] = const.REVIEW_CODE if status == "feedback" else const.REVIEW_SECURITY_RECOMMENDATIONS
+
+ elif review_type == const.REVIEW_SECURITY_RECOMMENDATIONS:
+ saved_state['security_review_status'] = status
+ saved_state['security_review_comments'] = feedback
+ node_name = "security_review"
+ saved_state['next_node'] = const.REVIEW_SECURITY_RECOMMENDATIONS if status == "feedback" else const.REVIEW_TEST_CASES
+
+ elif review_type == const.REVIEW_TEST_CASES:
+ saved_state['test_case_review_status'] = status
+ saved_state['test_case_review_feedback'] = feedback
+ node_name = "review_test_cases"
+ saved_state['next_node'] = const.REVIEW_TEST_CASES if status == "feedback" else const.REVIEW_QA_TESTING
+
+ elif review_type == const.REVIEW_QA_TESTING:
+ saved_state['qa_testing_status'] = status
+ saved_state['qa_testing_feedback'] = feedback
+ node_name = "qa_review"
+ saved_state['next_node'] = const.REVIEW_QA_TESTING if status == "feedback" else const.END_NODE
+
+ else:
+ raise ValueError(f"Unsupported review type: {review_type}")
+
+ return self.update_and_resume_graph(saved_state,task_id,node_name)
+
+ ## -------- Helper Method to handle the graph resume state ------- ##
+ def update_and_resume_graph(self, saved_state,task_id, as_node):
+ graph = self.graph
+ thread = self.get_thread(task_id)
+
+ graph.update_state(thread, saved_state, as_node=as_node)
+
+ # Resume the graph
+ state = None
+ for event in graph.stream(None, thread, stream_mode="values"):
+ logger.debug(f"Event Received: {event}")
+ state = event
+
+ # saving the state before asking the product owner for review
+ current_state = graph.get_state(thread)
+ save_state_to_redis(task_id, current_state)
+
+ return {"task_id" : task_id, "state": state}
+
+
+ def get_updated_state(self, task_id):
+ saved_state = get_state_from_redis(task_id)
+ return {"task_id" : task_id, "state": saved_state}
+
diff --git a/src/dev_pilot/nodes/__init__.py b/src/dev_pilot/nodes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/nodes/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d0079bb1bee7d51bfeac7957a022cb3a63fe8e17
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/basic_chatbot_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/basic_chatbot_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a6d94030d9f377b559236fada41bbfa1ec93777a
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/basic_chatbot_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/chatbot_ai_news_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/chatbot_ai_news_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c545e717488c0f3dc71129562079602ec547dbe4
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/chatbot_ai_news_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/chatbot_with_tools_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/chatbot_with_tools_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc59535ddba33d9a097350afcd7244107db7f480
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/chatbot_with_tools_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/coding_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/coding_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a2f0d4a45daa5d248bcfeeb4f23509c4b6dc6ead
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/coding_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/design_document_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/design_document_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a13301b08cd4c388650320fa8af90c40e2f418ed
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/design_document_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/markdown_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/markdown_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..593a8f15141423c3fd96abfbf1c8c6bce3e0524a
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/markdown_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/__pycache__/project_requirement_node.cpython-312.pyc b/src/dev_pilot/nodes/__pycache__/project_requirement_node.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aa82eb80dfac08c314b9e8479b04bc08e6dc2231
Binary files /dev/null and b/src/dev_pilot/nodes/__pycache__/project_requirement_node.cpython-312.pyc differ
diff --git a/src/dev_pilot/nodes/coding_node.py b/src/dev_pilot/nodes/coding_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..96cc46ea08be6beb68ef4cfb045f091ec427eb46
--- /dev/null
+++ b/src/dev_pilot/nodes/coding_node.py
@@ -0,0 +1,280 @@
+from src.dev_pilot.state.sdlc_state import SDLCState, UserStoryList
+from src.dev_pilot.utils.Utility import Utility
+from loguru import logger
+
+class CodingNode:
+ """
+ Graph Node for the Coding
+
+ """
+
+ def __init__(self, model):
+ self.llm = model
+ self.utility = Utility()
+
+ ## ---- Code Generation ----- ##
+ def generate_code(self, state: SDLCState):
+ """
+ Generates the code for the given SDLC state as multiple Python files.
+ """
+ logger.info("----- Generating the code ----")
+
+ requirements = state.get('requirements', '')
+ user_stories = state.get('user_stories', '')
+ code_feedback = state.get('code_review_feedback', '') if 'code_generated' in state else ""
+ security_feedback = state.get('security_recommendations', '') if 'security_recommendations' in state else ""
+
+ prompt = f"""
+ Generate a complete Python project organized as multiple code files.
+ Based on the following SDLC state, generate only the Python code files with their complete implementations.
+ Do NOT include any explanations, requirements text, or design document details in the output—only code files with proper names and code content.
+
+ SDLC State:
+ ---------------
+ Project Name: {state['project_name']}
+
+ Requirements:
+ {self.utility.format_list(requirements)}
+
+ User Stories:
+ {self.utility.format_user_stories(user_stories)}
+
+ Functional Design Document:
+ {state['design_documents']['functional']}
+
+ Technical Design Document:
+ {state['design_documents']['technical']}
+
+ {"Note: Incorporate the following code review feedback: " + code_feedback if code_feedback else ""}
+ {"Note: Apply the following security recommendations: " + security_feedback if security_feedback else ""}
+
+ Instructions:
+ - Structure the output as multiple code files (for example, "main.py", "module1.py", etc.), each separated clearly.
+ - Each file should contain only the code necessary for a modular, fully-functional project based on the input state.
+ - Do not output any additional text, explanations, or commentary outside the code files.
+ - Ensure the code follows Python best practices, is syntactically correct, and is ready for development.
+ """
+ response = self.llm.invoke(prompt)
+ code_review_comments = self.get_code_review_comments(code=response.content)
+ return {
+ 'code_generated': response.content,
+ 'code_review_comments': code_review_comments
+ }
+
+ ## This code review comments will be used while generating test cases
+ def get_code_review_comments(self, code: str):
+ """
+ Generate code review comments for the provided code
+ """
+ logger.info("----- Generating code review comments ----")
+
+ # Create a prompt for the LLM to review the code
+ prompt = f"""
+ You are a coding expert. Please review the following code and provide detailed feedback:
+ ```
+ {code}
+ ```
+ Focus on:
+ 1. Code quality and best practices
+ 2. Potential bugs or edge cases
+ 3. Performance considerations
+ 4. Security concerns
+
+ End your review with an explicit APPROVED or NEEDS_FEEDBACK status.
+ """
+
+ # Get the review from the LLM
+ response = self.llm.invoke(prompt)
+ review_comments = response.content
+ return review_comments
+
+ def code_review(self, state: SDLCState):
+ return state
+
+ def fix_code(self, state: SDLCState):
+ pass
+
+ def code_review_router(self, state: SDLCState):
+ """
+ Evaluates Code review is required or not.
+ """
+ return state.get("code_review_status", "approved") # default to "approved" if not present
+
+ ## ---- Security Review ----- ##
+ def security_review_recommendations(self, state: SDLCState):
+ """
+ Performs security review of the code generated
+ """
+ logger.info("----- Generating security recommendations ----")
+
+ # Get the generated code from the state
+ code_generated = state.get('code_generated', '')
+
+ # Create a prompt for the LLM to review the code for security concerns
+ prompt = f"""
+ You are a security expert. Please review the following Python code for potential security vulnerabilities:
+ ```
+ {code_generated}
+ ```
+ Focus on:
+ 1. Identifying potential security risks (e.g., SQL injection, XSS, insecure data handling).
+ 2. Providing recommendations to mitigate these risks.
+ 3. Highlighting any best practices that are missing.
+
+ End your review with an explicit APPROVED or NEEDS_FEEDBACK status.
+ """
+
+ # Invoke the LLM to perform the security review
+ response = self.llm.invoke(prompt)
+ state["security_recommendations"] = response.content
+ return state
+
+ def security_review(self, state: SDLCState):
+ return state
+
+ def fix_code_after_security_review(self, state: SDLCState):
+ pass
+
+ def security_review_router(self, state: SDLCState):
+ """
+ Security Code review is required or not.
+ """
+ return state.get("security_review_status", "approved") # default to "approved" if not present
+
+ ## ---- Test Cases ----- ##
+ def write_test_cases(self, state: SDLCState):
+ """
+ Generates the test cases based on the generated code and code review comments
+ """
+ logger.info("----- Generating Test Cases ----")
+
+ # Get the generated code and code review comments from the state
+ code_generated = state.get('code_generated', '')
+ code_review_comments = state.get('code_review_comments', '')
+
+ # Create a prompt for the LLM to generate test cases
+ prompt = f"""
+ You are a software testing expert. Based on the following Python code and its review comments, generate comprehensive test cases:
+
+ ### Code:
+ ```
+ {code_generated}
+ ```
+
+ ### Code Review Comments:
+ {code_review_comments}
+
+ Focus on:
+ 1. Covering all edge cases and boundary conditions.
+ 2. Ensuring functional correctness of the code.
+ 3. Including both positive and negative test cases.
+ 4. Writing test cases in Python's `unittest` framework format.
+
+ Provide the test cases in Python code format, ready to be executed.
+ """
+
+ response = self.llm.invoke(prompt)
+ state["test_cases"] = response.content
+
+ return state
+
+ def review_test_cases(self, state: SDLCState):
+ return state
+
+ def revise_test_cases(self, state: SDLCState):
+ pass
+
+ def review_test_cases_router(self, state: SDLCState):
+ """
+ Evaluates Test Cases review is required or not.
+ """
+ return state.get("test_case_review_status", "approved") # default to "approved" if not present
+
+ ## ---- QA Testing ----- ##
+ def qa_testing(self, state: SDLCState):
+ """
+ Performs QA testing based on the generated code and test cases
+ """
+ logger.info("----- Performing QA Testing ----")
+ # Get the generated code and test cases from the state
+ code_generated = state.get('code_generated', '')
+ test_cases = state.get('test_cases', '')
+
+ # Create a prompt for the LLM to simulate running the test cases
+ prompt = f"""
+ You are a QA testing expert. Based on the following Python code and test cases, simulate running the test cases and provide feedback:
+
+ ### Code:
+ ```
+ {code_generated}
+ ```
+
+ ### Test Cases:
+ ```
+ {test_cases}
+ ```
+
+ Focus on:
+ 1. Identifying which test cases pass and which fail.
+ 2. Providing detailed feedback for any failed test cases, including the reason for failure.
+ 3. Suggesting improvements to the code or test cases if necessary.
+
+ Provide the results in the following format:
+ - Test Case ID: [ID]
+ Status: [Pass/Fail]
+ Feedback: [Detailed feedback if failed]
+ """
+
+ # Invoke the LLM to simulate QA testing
+ response = self.llm.invoke(prompt)
+ qa_testing_comments = response.content
+
+ state["qa_testing_comments"]= qa_testing_comments
+ return state
+
+ def qa_review(self, state: SDLCState):
+ pass
+
+ def deployment(self, state: SDLCState):
+ """
+ Performs the deployment
+ """
+ logger.info("----- Generating Deployment Simulation----")
+
+ code_generated = state.get('code_generated', '')
+
+ # Create a prompt for the LLM to simulate deployment
+ prompt = f"""
+ You are a DevOps expert. Based on the following Python code, simulate the deployment process and provide feedback:
+
+ ### Code:
+ ```
+ {code_generated}
+ ```
+
+ Focus on:
+ 1. Identifying potential deployment issues (e.g., missing dependencies, configuration errors).
+ 2. Providing recommendations to resolve any issues.
+ 3. Confirming whether the deployment is successful or needs further action.
+
+ Provide the results in the following format:
+ - Deployment Status: [Success/Failed]
+ - Feedback: [Detailed feedback on the deployment process]
+ """
+
+ # Invoke the LLM to simulate deployment
+ response = self.llm.invoke(prompt)
+ deployment_feedback = response.content
+
+ # Determine the deployment status based on the feedback
+ if "SUCCESS" in deployment_feedback.upper():
+ deployment_status = "success"
+ else:
+ deployment_status = "failed"
+
+ # Update the state with the deployment results
+ return {
+ **state,
+ "deployment_status": deployment_status,
+ "deployment_feedback": deployment_feedback
+ }
\ No newline at end of file
diff --git a/src/dev_pilot/nodes/design_document_node.py b/src/dev_pilot/nodes/design_document_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..854740d9ea3cea49fa5aa71bdb4bb1160f227200
--- /dev/null
+++ b/src/dev_pilot/nodes/design_document_node.py
@@ -0,0 +1,145 @@
+from src.dev_pilot.state.sdlc_state import SDLCState, DesignDocument
+from src.dev_pilot.utils.Utility import Utility
+from loguru import logger
+
+class DesingDocumentNode:
+ """
+ Graph Node for the Desing Documents
+
+ """
+
+ def __init__(self, model):
+ self.llm = model
+ self.utility = Utility()
+
+ def create_design_documents(self, state: SDLCState):
+ """
+ Generates the Design document functional and technical
+ """
+ logger.info("----- Creating Design Document ----")
+ requirements = state.get('requirements', '')
+ user_stories = state.get('user_stories', '')
+ project_name = state.get('project_name', '')
+ design_feedback = None
+
+ if 'design_documents' in state:
+ design_feedback = state.get('design_documents_feedback','')
+
+ functional_documents = self.generate_functional_design(
+ project_name=project_name,
+ requirements=requirements,
+ user_stories=user_stories,
+ design_feedback=design_feedback
+ )
+
+ technical_documents = self.generate_technical_design(
+ project_name=project_name,
+ requirements=requirements,
+ user_stories=user_stories,
+ design_feedback=design_feedback
+ )
+
+ design_documents = DesignDocument(
+ functional=functional_documents,
+ technical = technical_documents
+ )
+
+ return {
+ **state,
+ "design_documents": design_documents,
+ "technical_documents": technical_documents
+ }
+
+ def generate_functional_design(self, project_name, requirements, user_stories, design_feedback):
+ """
+ Helper method to generate functional design document
+ """
+ logger.info("----- Creating Functional Design Document ----")
+ prompt = f"""
+ Create a comprehensive functional design document for {project_name} in Markdown format.
+
+ The document should use proper Markdown syntax with headers (# for main titles, ## for sections, etc.),
+ bullet points, tables, and code blocks where appropriate.
+
+ Requirements:
+ {self.utility.format_list(requirements)}
+
+ User Stories:
+ {self.utility.format_user_stories(user_stories)}
+
+ {f"When creating this functional design document, please incorporate the following feedback about the requirements: {design_feedback}" if design_feedback else ""}
+
+ The functional design document should include the following sections, each with proper Markdown formatting:
+
+ # Functional Design Document: {project_name}
+
+ ## 1. Introduction and Purpose
+ ## 2. Project Scope
+ ## 3. User Roles and Permissions
+ ## 4. Functional Requirements Breakdown
+ ## 5. User Interface Design Guidelines
+ ## 6. Business Process Flows
+ ## 7. Data Entities and Relationships
+ ## 8. Validation Rules
+ ## 9. Reporting Requirements
+ ## 10. Integration Points
+
+ Make sure to maintain proper Markdown formatting throughout the document.
+ """
+ # invoke the llm
+ response = self.llm.invoke(prompt)
+ return response.content
+
+ def generate_technical_design(self, project_name, requirements, user_stories, design_feedback):
+ """
+ Helper method to generate technical design document in Markdown format
+ """
+ logger.info("----- Creating Technical Design Document ----")
+ prompt = f"""
+ Create a comprehensive technical design document for {project_name} in Markdown format.
+
+ The document should use proper Markdown syntax with headers (# for main titles, ## for sections, etc.),
+ bullet points, tables, code blocks, and diagrams described in text form where appropriate.
+
+ Requirements:
+ {self.utility.format_list(requirements)}
+
+ User Stories:
+ {self.utility.format_user_stories(user_stories)}
+
+ {f"When creating this technical design document, please incorporate the following feedback about the requirements: {design_feedback}" if design_feedback else ""}
+
+ The technical design document should include the following sections, each with proper Markdown formatting:
+
+ # Technical Design Document: {project_name}
+
+ ## 1. System Architecture
+ ## 2. Technology Stack and Justification
+ ## 3. Database Schema
+ ## 4. API Specifications
+ ## 5. Security Considerations
+ ## 6. Performance Considerations
+ ## 7. Scalability Approach
+ ## 8. Deployment Strategy
+ ## 9. Third-party Integrations
+ ## 10. Development, Testing, and Deployment Environments
+
+ For any code examples, use ```language-name to specify the programming language.
+ For database schemas, represent tables and relationships using Markdown tables.
+ Make sure to maintain proper Markdown formatting throughout the document.
+ """
+ response = self.llm.invoke(prompt)
+ return response.content
+
+ def review_design_documents(self, state: SDLCState):
+ return state
+
+ def revise_design_documents(self, state: SDLCState):
+ pass
+
+ def review_design_documents_router(self, state: SDLCState):
+ """
+ Evaluates design review is required or not.
+ """
+ return state.get("design_documents_review_status", "approved") # default to "approved" if not present
+
\ No newline at end of file
diff --git a/src/dev_pilot/nodes/markdown_node.py b/src/dev_pilot/nodes/markdown_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..4601b592089d321cea23b96d56d9ad9b4d5abfc3
--- /dev/null
+++ b/src/dev_pilot/nodes/markdown_node.py
@@ -0,0 +1,124 @@
+import os
+from src.dev_pilot.state.sdlc_state import SDLCState
+from src.dev_pilot.utils.Utility import Utility
+from loguru import logger
+
+class MarkdownArtifactsNode:
+ """
+ Graph Node for generating Markdown artifacts for the SDLC process.
+ This node generates Markdown files for:
+ - Project Requirements
+ - User Stories
+ - Design Documents
+ - Generated Code
+ and saves them to the "artifacts" folder.
+ """
+
+ def __init__(self):
+ self.utility = Utility()
+
+ def generate_markdown_artifacts(self, state: SDLCState):
+ """
+ Generate Markdown files for each step in the SDLC state and save them to the artifacts folder.
+ Returns the updated state with a new key 'artifacts' that maps to a dictionary of file paths.
+ """
+ artifacts_dir = "artifacts"
+ os.makedirs(artifacts_dir, exist_ok=True)
+
+ project_name = state.get("project_name", "Project")
+
+ # -- Project Requirements Markdown --
+ requirements = state.get("requirements", [])
+ md_project = f"# Project Requirement for {project_name}\n\n"
+ md_project += "## Requirements\n"
+ md_project += self.utility.format_list(requirements)
+ file_project = os.path.join(artifacts_dir, "Project_Requirement.md")
+ with open(file_project, "w") as f:
+ f.write(md_project)
+
+ # -- User Stories Markdown --
+ user_stories = state.get("user_stories", None)
+ file_stories = None
+ if user_stories:
+ md_stories = f"# User Stories for {project_name}\n\n"
+ md_stories += self.utility.format_user_stories(user_stories)
+ file_stories = os.path.join(artifacts_dir, "User_Stories.md")
+ with open(file_stories, "w") as f:
+ f.write(md_stories)
+
+ # -- Design Documents Markdown --
+ design_docs = state.get("design_documents", None)
+ file_design = None
+ if design_docs:
+ md_design = f"# Design Documents for {project_name}\n\n"
+ md_design += "## Functional Design Document\n"
+ md_design += design_docs.get("functional", "No Functional Design Document available.")
+ md_design += "\n\n## Technical Design Document\n"
+ md_design += design_docs.get("technical", "No Technical Design Document available.")
+ file_design = os.path.join(artifacts_dir, "Design_Documents.md")
+ with open(file_design, "w") as f:
+ f.write(md_design)
+
+ # -- Generated Code Markdown --
+ code_generated = state.get("code_generated", None)
+ file_code = None
+ if code_generated:
+ md_code = f"# Generated Code for {project_name}\n\n"
+ md_code += "\n" + code_generated
+ file_code = os.path.join(artifacts_dir, "Generated_Code.md")
+ with open(file_code, "w") as f:
+ f.write(md_code)
+
+ # -- Security Recommendations Markdown --
+ security_recommendations = state.get("security_recommendations", None)
+ file_security = None
+ if security_recommendations:
+ md_security = f"# Security Recommendations for {project_name}\n\n"
+ md_security += security_recommendations
+ file_security = os.path.join(artifacts_dir, "Security_Recommendations.md")
+ with open(file_security, "w") as f:
+ f.write(md_security)
+
+ # -- Test Cases Markdown --
+ test_cases = state.get("test_cases", None)
+ file_tests = None
+ if test_cases:
+ md_tests = f"# Test Cases for {project_name}\n\n"
+ md_tests += "\n" + test_cases
+ file_tests = os.path.join(artifacts_dir, "Test_Cases.md")
+ with open(file_tests, "w") as f:
+ f.write(md_tests)
+
+ # -- QA Testing Comments Markdown --
+ qa_testing_comments = state.get("qa_testing_comments", None)
+ file_qa = None
+ if qa_testing_comments:
+ md_qa = f"# QA Testing Comments for {project_name}\n\n"
+ md_qa += qa_testing_comments
+ file_qa = os.path.join(artifacts_dir, "QA_Testing_Comments.md")
+ with open(file_qa, "w") as f:
+ f.write(md_qa)
+
+ # -- Deployment Feedback Markdown --
+ deployment_feedback = state.get("deployment_feedback", None)
+ file_deployment = None
+ if deployment_feedback:
+ md_deployment = f"# Deployment Feedback for {project_name}\n\n"
+ md_deployment += deployment_feedback
+ file_deployment = os.path.join(artifacts_dir, "Deployment_Feedback.md")
+ with open(file_deployment, "w") as f:
+ f.write(md_deployment)
+
+ # Update the state with the paths to the generated artifact files.
+ state["artifacts"] = {
+ "Project_Requirements": file_project,
+ "User_Stories": file_stories,
+ "Design_Documents": file_design,
+ "Generated_Code": file_code,
+ "Security_Recommendations": file_security,
+ "Test_Cases": file_tests,
+ "QA_Testing_Comments": file_qa,
+ "Deployment_Feedback": file_deployment
+ }
+ logger.info("Markdown artifacts generated in folder:", artifacts_dir)
+ return state
\ No newline at end of file
diff --git a/src/dev_pilot/nodes/project_requirement_node.py b/src/dev_pilot/nodes/project_requirement_node.py
new file mode 100644
index 0000000000000000000000000000000000000000..650bc0cc2b94afe7e62f6e98a1a743cb6bb1af20
--- /dev/null
+++ b/src/dev_pilot/nodes/project_requirement_node.py
@@ -0,0 +1,81 @@
+from src.dev_pilot.state.sdlc_state import SDLCState, UserStoryList
+from langchain_core.messages import SystemMessage
+
+class ProjectRequirementNode:
+ """
+ Graph Node for the project requirements
+
+ """
+
+ def __init__(self, model):
+ self.llm = model
+
+ def initialize_project(self, state: SDLCState):
+ """
+ Performs the project initilazation
+ """
+ return state
+
+ def get_user_requirements(self, state: SDLCState):
+ """
+ Gets the requirements from the user
+ """
+ pass
+
+ def generate_user_stories(self, state: SDLCState):
+ """
+ Auto-generate highly detailed and accurate user stories for each requirement.
+ """
+ project_name = state["project_name"]
+ requirements = state["requirements"]
+ feedback_reason = state.get("user_stories_feedback", None)
+
+ prompt = f"""
+ You are a senior software analyst specializing in Agile SDLC and user story generation.
+ Your task is to generate **a separate and detailed user story for EACH requirement** from the project details below.
+
+ ---
+ **Project Name:** "{project_name}"
+
+ **Requirements:** "{requirements}
+
+ ---
+ **Instructions for User Story Generation:**
+ - Create **one user story per requirement**.
+ - Assign a **unique identifier** (e.g., US-001, US-002, etc.).
+ - Provide a **clear and concise title** summarizing the user story.
+ - Write a **detailed description** using the "As a [user role], I want [goal] so that [benefit]" format.
+ - Assign a **priority level** (1 = Critical, 2 = High, 3 = Medium, 4 = Low).
+ - Define **acceptance criteria** with bullet points to ensure testability.
+ - Use **domain-specific terminology** for clarity.
+
+ {f"Additionally, consider the following feedback while refining the user stories: {feedback_reason}" if feedback_reason else ""}
+
+ ---
+ **Expected Output Format (for each user story):**
+ - Unique Identifier: US-XXX
+ - Title: [User Story Title]
+ - Description:
+ - As a [user role], I want [feature] so that [benefit].
+ - Priority: [1-4]
+ - Acceptance Criteria:
+ - [Criteria 1]
+ - [Criteria 2]
+ - [Criteria 3]
+
+ Ensure that the user stories are **specific, testable, and aligned with Agile principles**.
+ """
+
+ llm_with_structured = self.llm.with_structured_output(UserStoryList)
+ response = llm_with_structured.invoke(prompt)
+ state["user_stories"] = response
+ return state
+
+ def review_user_stories(self, state: SDLCState):
+ return state
+
+ def revise_user_stories(self, state: SDLCState):
+ pass
+
+ def review_user_stories_router(self, state: SDLCState):
+ return state.get("user_stories_review_status", "approved") # default to "approved" if not present
\ No newline at end of file
diff --git a/src/dev_pilot/state/__init__.py b/src/dev_pilot/state/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/state/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/state/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b5b66ad233a5f8893ae03c547b7dfb1ed8c34843
Binary files /dev/null and b/src/dev_pilot/state/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/state/__pycache__/sdlc_state.cpython-312.pyc b/src/dev_pilot/state/__pycache__/sdlc_state.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..67a4476976e85878950f15f595f88fb5e3cdd0d4
Binary files /dev/null and b/src/dev_pilot/state/__pycache__/sdlc_state.cpython-312.pyc differ
diff --git a/src/dev_pilot/state/__pycache__/state.cpython-312.pyc b/src/dev_pilot/state/__pycache__/state.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d309fbf72c3d6d46b52246178b24b391434c63f0
Binary files /dev/null and b/src/dev_pilot/state/__pycache__/state.cpython-312.pyc differ
diff --git a/src/dev_pilot/state/sdlc_state.py b/src/dev_pilot/state/sdlc_state.py
new file mode 100644
index 0000000000000000000000000000000000000000..72fa5750826c143072b1573befc1ef65013e56e9
--- /dev/null
+++ b/src/dev_pilot/state/sdlc_state.py
@@ -0,0 +1,65 @@
+from pydantic import BaseModel, Field
+from typing import TypedDict, Any, Dict, Literal, Optional
+import json
+import src.dev_pilot.utils.constants as const
+
+
+class UserStories(BaseModel):
+ id: int = Field(...,description="The unique identifier of the user story")
+ title: str = Field(...,description="The title of the user story")
+ description: str = Field(...,description="The description of the user story")
+ priority: int = Field(...,description="The priority of the user story")
+ acceptance_criteria: str = Field(...,description="The acceptance criteria of the user story")
+
+class UserStoryList(BaseModel):
+ user_stories: list[UserStories]
+
+class DesignDocument(BaseModel):
+ functional: str = Field(..., description="Holds the functional design Document")
+ technical: str = Field(..., description="Holds the technical design Document")
+
+class SDLCState(TypedDict):
+ """
+ Represents the structure of the state used in the SDLC graph
+
+ """
+ next_node: str = const.PROJECT_INITILIZATION
+ project_name: str
+ requirements: list[str]
+ user_stories: UserStoryList
+ user_stories_feedback: str
+ user_stories_review_status: str
+ design_documents: DesignDocument
+ design_documents_feedback: str
+ design_documents_review_status: str
+ code_generated: str
+ code_review_comments: str
+ code_review_feedback: str
+ code_review_status: str
+ security_recommendations: str
+ security_review_comments: str
+ security_review_status: str
+ test_cases: str
+ test_case_review_status: str
+ test_case_review_feedback: str
+ qa_testing_comments: str
+ qa_testing_status: str
+ qa_testing_feedback: str
+ deployment_status: str
+ deployment_feedback: str
+ artifacts: dict[str, str]
+
+
+
+class CustomEncoder(json.JSONEncoder):
+ def default(self, obj):
+ # Check if the object is any kind of Pydantic model
+ if isinstance(obj, BaseModel):
+ return obj.model_dump()
+ # Or check for specific classes if needed
+ # if isinstance(obj, UserStories) or isinstance(obj, DesignDocument):
+ # return obj.model_dump()
+ return super().default(obj)
+
+
+
\ No newline at end of file
diff --git a/src/dev_pilot/ui/__init__.py b/src/dev_pilot/ui/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/ui/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/ui/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..66ce807cb15884177c176d70ea79415c7a1290bd
Binary files /dev/null and b/src/dev_pilot/ui/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/ui/__pycache__/uiconfigfile.cpython-312.pyc b/src/dev_pilot/ui/__pycache__/uiconfigfile.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5879ec1be731e1a532b01bdc73fd910829e42687
Binary files /dev/null and b/src/dev_pilot/ui/__pycache__/uiconfigfile.cpython-312.pyc differ
diff --git a/src/dev_pilot/ui/streamlit_ui/__pycache__/display_result.cpython-312.pyc b/src/dev_pilot/ui/streamlit_ui/__pycache__/display_result.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2a885cd1846c44f50cc3698e71790c4483cf5efb
Binary files /dev/null and b/src/dev_pilot/ui/streamlit_ui/__pycache__/display_result.cpython-312.pyc differ
diff --git a/src/dev_pilot/ui/streamlit_ui/__pycache__/loadui.cpython-312.pyc b/src/dev_pilot/ui/streamlit_ui/__pycache__/loadui.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d4c1d0968374fef5abcae9caf664719278bb79f
Binary files /dev/null and b/src/dev_pilot/ui/streamlit_ui/__pycache__/loadui.cpython-312.pyc differ
diff --git a/src/dev_pilot/ui/streamlit_ui/__pycache__/streamlit_app.cpython-312.pyc b/src/dev_pilot/ui/streamlit_ui/__pycache__/streamlit_app.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3daab9434818975d4fbcfe0fbbc896971b699e90
Binary files /dev/null and b/src/dev_pilot/ui/streamlit_ui/__pycache__/streamlit_app.cpython-312.pyc differ
diff --git a/src/dev_pilot/ui/streamlit_ui/streamlit_app.py b/src/dev_pilot/ui/streamlit_ui/streamlit_app.py
new file mode 100644
index 0000000000000000000000000000000000000000..73f8b11fa26ff46f5d05486bcdb05ef3064aefb1
--- /dev/null
+++ b/src/dev_pilot/ui/streamlit_ui/streamlit_app.py
@@ -0,0 +1,458 @@
+import streamlit as st
+from src.dev_pilot.LLMS.groqllm import GroqLLM
+from src.dev_pilot.LLMS.geminillm import GeminiLLM
+from src.dev_pilot.LLMS.openai_llm import OpenAILLM
+from src.dev_pilot.graph.graph_builder import GraphBuilder
+from src.dev_pilot.ui.uiconfigfile import Config
+import src.dev_pilot.utils.constants as const
+from src.dev_pilot.graph.graph_executor import GraphExecutor
+from src.dev_pilot.state.sdlc_state import UserStoryList
+import os
+
+def initialize_session():
+ st.session_state.stage = const.PROJECT_INITILIZATION
+ st.session_state.project_name = ""
+ st.session_state.requirements = ""
+ st.session_state.task_id = ""
+ st.session_state.state = {}
+
+
+def load_sidebar_ui(config):
+ user_controls = {}
+
+ with st.sidebar:
+ # Get options from config
+ llm_options = config.get_llm_options()
+
+ # LLM selection
+ user_controls["selected_llm"] = st.selectbox("Select LLM", llm_options)
+
+ if user_controls["selected_llm"] == 'Groq':
+ # Model selection
+ model_options = config.get_groq_model_options()
+ user_controls["selected_groq_model"] = st.selectbox("Select Model", model_options)
+ # API key input
+ os.environ["GROQ_API_KEY"] = user_controls["GROQ_API_KEY"] = st.session_state["GROQ_API_KEY"] = st.text_input("API Key",
+ type="password",
+ value=os.getenv("GROQ_API_KEY", ""))
+ # Validate API key
+ if not user_controls["GROQ_API_KEY"]:
+ st.warning("⚠️ Please enter your GROQ API key to proceed. Don't have? refer : https://console.groq.com/keys ")
+
+ if user_controls["selected_llm"] == 'Gemini':
+ # Model selection
+ model_options = config.get_gemini_model_options()
+ user_controls["selected_gemini_model"] = st.selectbox("Select Model", model_options)
+ # API key input
+ os.environ["GEMINI_API_KEY"] = user_controls["GEMINI_API_KEY"] = st.session_state["GEMINI_API_KEY"] = st.text_input("API Key",
+ type="password",
+ value=os.getenv("GEMINI_API_KEY", ""))
+ # Validate API key
+ if not user_controls["GEMINI_API_KEY"]:
+ st.warning("⚠️ Please enter your GEMINI API key to proceed. Don't have? refer : https://ai.google.dev/gemini-api/docs/api-key ")
+
+
+ if user_controls["selected_llm"] == 'OpenAI':
+ # Model selection
+ model_options = config.get_openai_model_options()
+ user_controls["selected_openai_model"] = st.selectbox("Select Model", model_options)
+ # API key input
+ os.environ["OPENAI_API_KEY"] = user_controls["OPENAI_API_KEY"] = st.session_state["OPENAI_API_KEY"] = st.text_input("API Key",
+ type="password",
+ value=os.getenv("OPENAI_API_KEY", ""))
+ # Validate API key
+ if not user_controls["OPENAI_API_KEY"]:
+ st.warning("⚠️ Please enter your OPENAI API key to proceed. Don't have? refer : https://platform.openai.com/api-keys ")
+
+ if st.button("Reset Session"):
+ for key in list(st.session_state.keys()):
+ del st.session_state[key]
+
+ initialize_session()
+ st.rerun()
+
+ st.subheader("Workflow Overview")
+ st.image("workflow_graph.png")
+
+ return user_controls
+
+
+def load_streamlit_ui(config):
+ st.set_page_config(page_title=config.get_page_title(), layout="wide")
+ st.header(config.get_page_title())
+ st.subheader("Let AI agents plan your SDLC journey", divider="rainbow", anchor=False)
+ user_controls = load_sidebar_ui(config)
+ return user_controls
+
+
+## Main Entry Point
+def load_app():
+ """
+ Main entry point for the Streamlit app using tab-based UI.
+ """
+ config = Config()
+ if 'stage' not in st.session_state:
+ initialize_session()
+
+ user_input = load_streamlit_ui(config)
+ if not user_input:
+ st.error("Error: Failed to load user input from the UI.")
+ return
+
+ try:
+ # Configure LLM
+ selectedLLM = user_input.get("selected_llm")
+ model = None
+ if selectedLLM == "Gemini":
+ obj_llm_config = GeminiLLM(user_controls_input=user_input)
+ model = obj_llm_config.get_llm_model()
+ elif selectedLLM == "Groq":
+ obj_llm_config = GroqLLM(user_controls_input=user_input)
+ model = obj_llm_config.get_llm_model()
+ elif selectedLLM == "OpenAI":
+ obj_llm_config = OpenAILLM(user_controls_input=user_input)
+ model = obj_llm_config.get_llm_model()
+ if not model:
+ st.error("Error: LLM model could not be initialized.")
+ return
+
+ ## Graph Builder
+ graph_builder = GraphBuilder(model)
+ try:
+ graph = graph_builder.setup_graph()
+ graph_executor = GraphExecutor(graph)
+ except Exception as e:
+ st.error(f"Error: Graph setup failed - {e}")
+ return
+
+ # Create tabs for different stages
+ tabs = st.tabs(["Project Requirement", "User Stories", "Design Documents", "Code Generation", "Test Cases", "QA Testing", "Deployment", "Download Artifacts"])
+
+ # ---------------- Tab 1: Project Requirement ----------------
+ with tabs[0]:
+ st.header("Project Requirement")
+ project_name = st.text_input("Enter the project name:", value=st.session_state.get("project_name", ""))
+ st.session_state.project_name = project_name
+
+ if st.session_state.stage == const.PROJECT_INITILIZATION:
+ if st.button("🚀 Let's Start"):
+ if not project_name:
+ st.error("Please enter a project name.")
+ st.stop()
+ graph_response = graph_executor.start_workflow(project_name)
+ st.session_state.task_id = graph_response["task_id"]
+ st.session_state.state = graph_response["state"]
+ st.session_state.project_name = project_name
+ st.session_state.stage = const.REQUIREMENT_COLLECTION
+ st.rerun()
+
+ # If stage has progressed beyond initialization, show requirements input and details.
+ if st.session_state.stage in [const.REQUIREMENT_COLLECTION, const.GENERATE_USER_STORIES]:
+ requirements_input = st.text_area(
+ "Enter the requirements. Write each requirement on a new line:",
+ value="\n".join(st.session_state.get("requirements", []))
+ )
+ if st.button("Submit Requirements"):
+ requirements = [req.strip() for req in requirements_input.split("\n") if req.strip()]
+ st.session_state.requirements = requirements
+ if not requirements:
+ st.error("Please enter at least one requirement.")
+ else:
+ st.success("Project details saved successfully!")
+ st.subheader("Project Details:")
+ st.write(f"**Project Name:** {st.session_state.project_name}")
+ st.subheader("Requirements:")
+ for req in requirements:
+ st.write(req)
+ graph_response = graph_executor.generate_stories(st.session_state.task_id, requirements)
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.GENERATE_USER_STORIES
+ st.rerun()
+
+ # ---------------- Tab 2: User Stories ----------------
+ with tabs[1]:
+ st.header("User Stories")
+ if "user_stories" in st.session_state.state:
+ user_story_list = st.session_state.state["user_stories"]
+ st.divider()
+ st.subheader("Generated User Stories")
+ if isinstance(user_story_list, UserStoryList):
+ for story in user_story_list.user_stories:
+ unique_id = f"US-{story.id:03}"
+ with st.container():
+ st.markdown(f"#### {story.title} ({unique_id})")
+ st.write(f"**Priority:** {story.priority}")
+ st.write(f"**Description:** {story.description}")
+ st.write(f"**Acceptance Criteria:**")
+ st.markdown(story.acceptance_criteria.replace("\n", "
"), unsafe_allow_html=True)
+ st.divider()
+
+ # User Story Review Stage.
+ if st.session_state.stage == const.GENERATE_USER_STORIES:
+ st.subheader("Review User Stories")
+ feedback_text = st.text_area("Provide feedback for improving the user stories (optional):")
+ col1, col2 = st.columns(2)
+ with col1:
+ if st.button("✅ Approve User Stories"):
+ st.success("✅ User stories approved.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="approved", feedback=None, review_type=const.REVIEW_USER_STORIES
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.CREATE_DESIGN_DOC
+
+ ## For Testing
+ # st.session_state.stage = const.CODE_GENERATION
+
+
+ with col2:
+ if st.button("✍️ Give User Stories Feedback"):
+ if not feedback_text.strip():
+ st.warning("⚠️ Please enter feedback before submitting.")
+ else:
+ st.info("🔄 Sending feedback to revise user stories.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="feedback", feedback=feedback_text.strip(),review_type=const.REVIEW_USER_STORIES
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.GENERATE_USER_STORIES
+ st.rerun()
+ else:
+ st.info("User stories generation pending or not reached yet.")
+
+ # ---------------- Tab 3: Design Documents ----------------
+ with tabs[2]:
+ st.header("Design Documents")
+ if st.session_state.stage == const.CREATE_DESIGN_DOC:
+
+ graph_response = graph_executor.get_updated_state(st.session_state.task_id)
+ st.session_state.state = graph_response["state"]
+
+ if "design_documents" in st.session_state.state:
+ design_doc = st.session_state.state["design_documents"]
+ st.subheader("Functional Design Document")
+ st.markdown(design_doc.get("functional", "No functional design document available."))
+ st.subheader("Technical Design Document")
+ st.markdown(design_doc.get("technical", "No technical design document available."))
+
+ # Design Document Review Stage.
+ st.divider()
+ st.subheader("Review Design Documents")
+ feedback_text = st.text_area("Provide feedback for improving the design documents (optional):")
+ col1, col2 = st.columns(2)
+ with col1:
+ if st.button("✅ Approve Design Documents"):
+ st.success("✅ Design documents approved.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="approved", feedback=None, review_type=const.REVIEW_DESIGN_DOCUMENTS
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.CODE_GENERATION
+
+ with col2:
+ if st.button("✍️ Give Design Documents Feedback"):
+ if not feedback_text.strip():
+ st.warning("⚠️ Please enter feedback before submitting.")
+ else:
+ st.info("🔄 Sending feedback to revise design documents.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="feedback", feedback=feedback_text.strip(),review_type=const.REVIEW_DESIGN_DOCUMENTS
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.CREATE_DESIGN_DOC
+ st.rerun()
+
+ else:
+ st.info("Design document generation pending or not reached yet.")
+
+ # ---------------- Tab 4: Coding ----------------
+ with tabs[3]:
+ st.header("Code Genearation")
+ if st.session_state.stage in [const.CODE_GENERATION, const.SECURITY_REVIEW]:
+
+ graph_response = graph_executor.get_updated_state(st.session_state.task_id)
+ st.session_state.state = graph_response["state"]
+
+ if "code_generated" in st.session_state.state:
+ code_generated = st.session_state.state["code_generated"]
+ st.subheader("Code Files")
+ st.markdown(code_generated)
+ st.divider()
+
+ if st.session_state.stage == const.CODE_GENERATION:
+ review_type = const.REVIEW_CODE
+ elif st.session_state.stage == const.SECURITY_REVIEW:
+ if "security_recommendations" in st.session_state.state:
+ security_recommendations = st.session_state.state["security_recommendations"]
+ st.subheader("Security Recommendations")
+ st.markdown(security_recommendations)
+ review_type = const.REVIEW_SECURITY_RECOMMENDATIONS
+
+ # Code Review Stage.
+ st.divider()
+ st.subheader("Review Details")
+
+ if st.session_state.stage == const.CODE_GENERATION:
+ feedback_text = st.text_area("Provide feedback (optional):")
+
+ col1, col2 = st.columns(2)
+ with col1:
+ if st.button("✅ Approve Code"):
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="approved", feedback=None, review_type=review_type
+ )
+ st.session_state.state = graph_response["state"]
+ if st.session_state.stage == const.CODE_GENERATION:
+ st.session_state.stage = const.SECURITY_REVIEW
+ st.rerun()
+ elif st.session_state.stage == const.SECURITY_REVIEW:
+ st.session_state.stage = const.WRITE_TEST_CASES
+
+ with col2:
+ if st.session_state.stage == const.SECURITY_REVIEW:
+ if st.button("✍️ Implment Security Recommendations"):
+ st.info("🔄 Sending feedback to revise code generation.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="feedback", feedback=None, review_type=review_type
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.CODE_GENERATION
+ st.rerun()
+ else:
+ if st.button("✍️ Give Feedback"):
+ if not feedback_text.strip():
+ st.warning("⚠️ Please enter feedback before submitting.")
+ else:
+ st.info("🔄 Sending feedback to revise code generation.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="feedback", feedback=feedback_text.strip(),review_type=review_type
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.CODE_GENERATION
+ st.rerun()
+
+ else:
+ st.info("Code generation pending or not reached yet.")
+
+ # ---------------- Tab 5: Test Cases ----------------
+ with tabs[4]:
+ st.header("Test Cases")
+ if st.session_state.stage == const.WRITE_TEST_CASES:
+
+ graph_response = graph_executor.get_updated_state(st.session_state.task_id)
+ st.session_state.state = graph_response["state"]
+
+ if "test_cases" in st.session_state.state:
+ test_cases = st.session_state.state["test_cases"]
+ st.markdown(test_cases)
+
+ # Test Cases Review Stage.
+ st.divider()
+ st.subheader("Review Test Cases")
+ feedback_text = st.text_area("Provide feedback for improving the test cases (optional):")
+ col1, col2 = st.columns(2)
+ with col1:
+ if st.button("✅ Approve Test Cases"):
+ st.success("✅ Test cases approved.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="approved", feedback=None, review_type=const.REVIEW_TEST_CASES
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.QA_TESTING
+
+ with col2:
+ if st.button("✍️ Give Test Cases Feedback"):
+ if not feedback_text.strip():
+ st.warning("⚠️ Please enter feedback before submitting.")
+ else:
+ st.info("🔄 Sending feedback to revise test cases.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="feedback", feedback=feedback_text.strip(),review_type=const.REVIEW_TEST_CASES
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.WRITE_TEST_CASES
+ st.rerun()
+
+ else:
+ st.info("Test Cases generation pending or not reached yet.")
+
+ # ---------------- Tab 6: QA Testing ----------------
+ with tabs[5]:
+ st.header("QA Testing")
+ if st.session_state.stage == const.QA_TESTING:
+
+ graph_response = graph_executor.get_updated_state(st.session_state.task_id)
+ st.session_state.state = graph_response["state"]
+
+ if "qa_testing_comments" in st.session_state.state:
+ qa_testing = st.session_state.state["qa_testing_comments"]
+ st.markdown(qa_testing)
+
+ # QA Testing Review Stage.
+ st.divider()
+ st.subheader("Review QA Testing Comments")
+ col1, col2 = st.columns(2)
+ with col1:
+ if st.button("✅ Approve Testing"):
+ st.success("✅ QA Testing approved.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="approved", feedback=None, review_type=const.REVIEW_QA_TESTING
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.DEPLOYMENT
+
+ with col2:
+ if st.button("✍️ Fix testing issues"):
+ st.info("🔄 Sending feedback to revise code.")
+ graph_response = graph_executor.graph_review_flow(
+ st.session_state.task_id, status="feedback", feedback=feedback_text.strip(),review_type=const.REVIEW_QA_TESTING
+ )
+ st.session_state.state = graph_response["state"]
+ st.session_state.stage = const.CODE_GENERATION
+ st.rerun()
+
+ else:
+ st.info("QA Testing Report generation pending or not reached yet.")
+
+ # ---------------- Tab 7: Deployment ----------------
+ with tabs[6]:
+ st.header("Deployment")
+ if st.session_state.stage == const.DEPLOYMENT:
+
+ graph_response = graph_executor.get_updated_state(st.session_state.task_id)
+ st.session_state.state = graph_response["state"]
+
+ if "deployment_feedback" in st.session_state.state:
+ deployment_feedback = st.session_state.state["deployment_feedback"]
+ st.markdown(deployment_feedback)
+ st.session_state.stage = const.ARTIFACTS
+
+ else:
+ st.info("Deplopment verification pending or not reached yet.")
+
+ # ---------------- Tab 8: Artifacts ----------------
+ with tabs[7]:
+ st.header("Artifacts")
+ if "artifacts" in st.session_state.state and st.session_state.state["artifacts"]:
+ st.subheader("Download Artifacts")
+ for artifact_name, artifact_path in st.session_state.state["artifacts"].items():
+ if artifact_path:
+ try:
+ with open(artifact_path, "rb") as f:
+ file_bytes = f.read()
+ st.download_button(
+ label=f"Download {artifact_name}",
+ data=file_bytes,
+ file_name=os.path.basename(artifact_path),
+ mime="application/octet-stream"
+ )
+ except Exception as e:
+ st.error(f"Error reading {artifact_name}: {e}")
+ else:
+ st.info(f"{artifact_name} not available.")
+ else:
+ st.info("No artifacts generated yet.")
+
+ except Exception as e:
+ raise ValueError(f"Error occured with Exception : {e}")
+
\ No newline at end of file
diff --git a/src/dev_pilot/ui/uiconfigfile.ini b/src/dev_pilot/ui/uiconfigfile.ini
new file mode 100644
index 0000000000000000000000000000000000000000..c6895935f2054bb5ddb62bfb6504406c7fe9d431
--- /dev/null
+++ b/src/dev_pilot/ui/uiconfigfile.ini
@@ -0,0 +1,6 @@
+[DEFAULT]
+PAGE_TITLE = Dev Pilot
+LLM_OPTIONS = Gemini, Groq, OpenAI
+GROQ_MODEL_OPTIONS = gemma2-9b-it, llama3-8b-8192, llama3-70b-8192
+GEMINI_MODEL_OPTIONS = gemini-2.0-flash, gemini-2.0-flash-lite, gemini-2.5-pro-exp-03-25
+OPENAI_MODEL_OPTIONS = gpt-4o, gpt-4, gpt-3.5-turbo
\ No newline at end of file
diff --git a/src/dev_pilot/ui/uiconfigfile.py b/src/dev_pilot/ui/uiconfigfile.py
new file mode 100644
index 0000000000000000000000000000000000000000..b139c44041f38e167d11fbe30d3cfbed13ad1a1a
--- /dev/null
+++ b/src/dev_pilot/ui/uiconfigfile.py
@@ -0,0 +1,21 @@
+from configparser import ConfigParser
+
+class Config:
+ def __init__(self,config_file="src/dev_pilot/ui/uiconfigfile.ini"):
+ self.config=ConfigParser()
+ self.config.read(config_file)
+
+ def get_llm_options(self):
+ return self.config["DEFAULT"].get("LLM_OPTIONS").split(", ")
+
+ def get_groq_model_options(self):
+ return self.config["DEFAULT"].get("GROQ_MODEL_OPTIONS").split(", ")
+
+ def get_gemini_model_options(self):
+ return self.config["DEFAULT"].get("GEMINI_MODEL_OPTIONS").split(", ")
+
+ def get_openai_model_options(self):
+ return self.config["DEFAULT"].get("OPENAI_MODEL_OPTIONS").split(", ")
+
+ def get_page_title(self):
+ return self.config["DEFAULT"].get("PAGE_TITLE")
\ No newline at end of file
diff --git a/src/dev_pilot/utils/Utility.py b/src/dev_pilot/utils/Utility.py
new file mode 100644
index 0000000000000000000000000000000000000000..2f5d4b4d95fb9530ca830e36b9907f8162862da0
--- /dev/null
+++ b/src/dev_pilot/utils/Utility.py
@@ -0,0 +1,24 @@
+from src.dev_pilot.state.sdlc_state import SDLCState
+
+class Utility:
+
+ def __init__(self):
+ pass
+
+ def format_list(self, items):
+ """Format list items nicely for prompt"""
+ return '\n'.join([f"- {item}" for item in items])
+
+ def format_user_stories(self, stories):
+ """Format user stories nicely for prompt"""
+ formatted_stories = []
+ for story in stories:
+ if hasattr(story, 'id') and hasattr(story, 'title') and hasattr(story, 'description'):
+ # Handle class instance
+ formatted_stories.append(f"- ID: {story.id}\n Title: {story.title}\n Description: {story.description}")
+ elif isinstance(story, dict):
+ # Handle dictionary
+ formatted_stories.append(f"- ID: {story.get('id', 'N/A')}\n Title: {story.get('title', 'N/A')}\n Description: {story.get('description', 'N/A')}")
+ return '\n'.join(formatted_stories)
+
+
\ No newline at end of file
diff --git a/src/dev_pilot/utils/__init__.py b/src/dev_pilot/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/dev_pilot/utils/__pycache__/Utility.cpython-312.pyc b/src/dev_pilot/utils/__pycache__/Utility.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0ece618183411d4e82874a8a922d60e97c5c018e
Binary files /dev/null and b/src/dev_pilot/utils/__pycache__/Utility.cpython-312.pyc differ
diff --git a/src/dev_pilot/utils/__pycache__/__init__.cpython-312.pyc b/src/dev_pilot/utils/__pycache__/__init__.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..32a51a862c88665c49691a1b074a374c2c4fad63
Binary files /dev/null and b/src/dev_pilot/utils/__pycache__/__init__.cpython-312.pyc differ
diff --git a/src/dev_pilot/utils/__pycache__/constants.cpython-312.pyc b/src/dev_pilot/utils/__pycache__/constants.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fa1a6825acca42d0eca6663a095785bee59960c3
Binary files /dev/null and b/src/dev_pilot/utils/__pycache__/constants.cpython-312.pyc differ
diff --git a/src/dev_pilot/utils/__pycache__/logging_config.cpython-312.pyc b/src/dev_pilot/utils/__pycache__/logging_config.cpython-312.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5fb0070d8c7baf28387187a684cb903b5a4a854
Binary files /dev/null and b/src/dev_pilot/utils/__pycache__/logging_config.cpython-312.pyc differ
diff --git a/src/dev_pilot/utils/constants.py b/src/dev_pilot/utils/constants.py
new file mode 100644
index 0000000000000000000000000000000000000000..689adeb88c6a20eaf199c2e5d19aa00802200d2e
--- /dev/null
+++ b/src/dev_pilot/utils/constants.py
@@ -0,0 +1,19 @@
+## Graph States
+PROJECT_INITILIZATION = "project_initilization"
+REQUIREMENT_COLLECTION = "requirement_collection"
+GENERATE_USER_STORIES = "generate_user_stories"
+CREATE_DESIGN_DOC = "create_design_document"
+CODE_GENERATION = "code_generation"
+SECURITY_REVIEW = "security_review"
+WRITE_TEST_CASES = "write_test_cases"
+QA_TESTING = "qa_testing"
+DEPLOYMENT = "deployment"
+ARTIFACTS = "artifacts"
+
+REVIEW_USER_STORIES = "review_user_stories"
+REVIEW_DESIGN_DOCUMENTS = "review_design_documents"
+REVIEW_CODE = "review_code"
+REVIEW_SECURITY_RECOMMENDATIONS = "review_security_recommendations"
+REVIEW_TEST_CASES = "review_test_cases"
+REVIEW_QA_TESTING = "review_qa_testing"
+END_NODE = "end_node"
diff --git a/src/dev_pilot/utils/logging_config.py b/src/dev_pilot/utils/logging_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..46249e3c9c8c5600b463c804663e70ffab927cf3
--- /dev/null
+++ b/src/dev_pilot/utils/logging_config.py
@@ -0,0 +1,29 @@
+import sys
+import os
+from loguru import logger
+
+def setup_logging(log_level: str = "INFO"):
+ # Create logs directory if it doesn't exist
+ log_dir = "logs"
+ os.makedirs(log_dir, exist_ok=True)
+
+ # Clear any default logger configurations
+ logger.remove()
+
+ # Console handler: colorized output with a nice format
+ logger.add(
+ sys.stdout,
+ colorize=True,
+ format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{line} - {message}",
+ level=log_level,
+ )
+
+ # File handler: logs to file with rotation, retention, and compression for production
+ logger.add(
+ os.path.join(log_dir, "devpilot.log"),
+ rotation="10 MB", # Rotate after 10 MB
+ retention="10 days", # Keep logs for 10 days
+ compression="zip", # Compress archived logs
+ format="{time:YYYY-MM-DD HH:mm:ss} | {level: <8} | {name}:{line} - {message}",
+ level=log_level,
+ )
\ No newline at end of file