Spaces:
Sleeping
Sleeping
File size: 18,565 Bytes
e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 f54fc8b e83ea81 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 |
# sdlc_node.py
import streamlit as st
import logging
from typing import Dict, Any
from src.langgraphagenticai.ui.streamlitui.sdlcfeedback import SDLCUI
# Configure logging for production (adjust level and handlers as necessary)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class SDLCNode:
def __init__(self, llm):
self.llm = llm
self.logger = logging.getLogger(self.__class__.__name__)
def _update_state(self, updates: Dict[str, Any]) -> None:
"""
Safely update st.session_state['state'] with provided updates.
"""
try:
if 'state' not in st.session_state:
st.session_state['state'] = {}
st.session_state['state'].update(updates)
except Exception as e:
self.logger.exception("Failed to update session state.")
st.error(f"Error updating session state: {e}")
def refresh_ui(self):
try:
ui = SDLCUI()
ui.render()
except Exception as e:
self.logger.exception("Error refreshing UI.")
st.error(f"UI Refresh failed: {e}")
def generate_user_stories(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['user_stories'] is not '':
return {"user_stories": st.session_state.state['user_stories']}
try:
prompt = f"""
Generate comprehensive user stories based on these requirements:
{state.get('requirements', 'No requirements provided')}
{f"PO Feedback to incorporate: {state.get('po_feedback', '')}" if state.get('po_feedback') else ""}
Format as Markdown with clear acceptance criteria.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"user_stories": content,
"current_result": content,
"current_step": "product_owner_review"
})
return {"user_stories": content}
except Exception as e:
self.logger.exception("Error in generate_user_stories.")
st.error(f"Failed to generate user stories: {e}")
return {"error": str(e)}
def product_owner_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['po_feedback'] is not '':
return {"po_feedback": st.session_state.state['po_feedback']}
try:
prompt = f"""
Please review the following user stories and provide your feedback.
User Stories:
{state.get('user_stories', 'No user stories available')}
If you approve, simply type "approve". Otherwise, provide detailed feedback.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"po_feedback": content,
"current_result": content,
"current_step": "create_design_docs" # Updated to next node
})
st.session_state.user_decision = None
return {"po_feedback": content}
except Exception as e:
self.logger.exception("Error in product_owner_review.")
st.error(f"Product owner review failed: {e}")
return {"error": str(e)}
def create_design_docs(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['design_docs'] is not '':
return {"design_docs": st.session_state.state['design_docs']}
try:
prompt = f"""
Create comprehensive design documents for the following user stories:
{state.get('user_stories', 'No user stories available')}
Include both functional and technical specifications.
Provide architecture diagrams in Mermaid format.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"design_docs": content,
"current_result": content,
"current_step": "revise_user_stories" # Updated to next node
})
return {"design_docs": content}
except Exception as e:
self.logger.exception("Error in create_design_docs.")
st.error(f"Failed to create design documents: {e}")
return {"error": str(e)}
def revise_user_stories(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['user_stories'] is not '':
return {"user_stories": st.session_state.state['user_stories']}
try:
prompt = f"""
Revise the user stories based on the following Product Owner feedback:
Original Stories: {state.get('user_stories', 'No stories generated')}
Feedback: {state.get('po_feedback', 'No feedback provided')}
Please maintain Markdown format and include clear acceptance criteria.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"user_stories": content,
"current_result": content,
"current_step": "design_review" # Updated to next node
})
return {"user_stories": content}
except Exception as e:
self.logger.exception("Error in revise_user_stories.")
st.error(f"Failed to revise user stories: {e}")
return {"error": str(e)}
def design_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['design_feedback'] is not '':
return {"design_feedback": st.session_state.state['design_feedback']}
try:
prompt = f"""
Please review the following design documents and provide your feedback.
Design Documents:
{state.get('design_docs', 'No design documents available')}
If you approve, type "approve". Otherwise, provide detailed design feedback.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"design_feedback": content,
"current_result": content,
"current_step": "generate_code" # Updated to next node
})
st.session_state.user_decision = None
return {"design_feedback": content}
except Exception as e:
self.logger.exception("Error in design_review.")
st.error(f"Design review failed: {e}")
return {"error": str(e)}
def generate_code(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['generate_code'] is not '':
return {"generate_code": st.session_state.state['generate_code']}
try:
prompt = f"""
Generate production-quality code for the following design:
{state.get('design_docs', 'No design documents available')}
{f"Code Review Feedback: {state.get('review_feedback', '')}" if state.get('review_feedback') else ""}
Include error handling, clear comments, and follow best coding practices.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"generate_code": content,
"current_result": content,
"current_step": "code_review" # Updated to next node
})
return {"generate_code": content}
except Exception as e:
self.logger.exception("Error in generate_code.")
st.error(f"Failed to generate code: {e}")
return {"error": str(e)}
def code_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['review_feedback'] is not '':
return {"review_feedback": st.session_state.state['review_feedback']}
try:
prompt = f"""
Please review the following generated code and provide your feedback.
Code:
{state.get('generated_code', 'No code generated')}
If the code is acceptable, type "approve". Otherwise, provide specific code review feedback.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"code_review": content,
"current_result": content,
"current_step": "security_review" # Updated to next node
})
st.session_state.user_decision = None
return {"review_feedback": content}
except Exception as e:
self.logger.exception("Error in code_review.")
st.error(f"Code review failed: {e}")
return {"error": str(e)}
def security_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
if st.session_state.state['security_feedback'] is not '':
return {"security_feedback": st.session_state.state['security_feedback']}
try:
prompt = f"""
Please review the following code for potential security vulnerabilities:
Code:
{state.get('generated_code', 'No code available')}
Provide any security-related feedback.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"security_feedback": content,
"current_result": content,
"current_step": "fix_code_after_code_review" # Updated to next node
})
st.session_state.user_decision = None
return {"security_feedback": content, "current_step": "fix_code_after_code_review"}
except Exception as e:
self.logger.exception("Error in security_review.")
st.error(f"Security review failed: {e}")
return {"error": str(e)}
def fix_code_after_code_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = f"""
Fix the following code based on the code review feedback provided.
Original Code: {state.get('generated_code', 'No code available')}
Feedback: {state.get('review_feedback', 'No feedback provided')}
Ensure that functionality is preserved while addressing all feedback.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"generated_code": content,
"current_result": content,
"current_step": "fix_code_after_security" # Updated to next node
})
st.session_state.user_decision = None
return {"generated_code": content}
except Exception as e:
self.logger.exception("Error in fix_code_after_code_review.")
st.error(f"Failed to fix code after review: {e}")
return {"error": str(e)}
def fix_code_after_security(self, state: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = f"""
Improve the security of the following code based on the security review feedback:
Code: {state.get('generated_code', 'No code available')}
Security Feedback: {state.get('security_feedback', 'No security feedback provided')}
Apply best practices for security and fix any vulnerabilities.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"generated_code": content,
"current_result": content,
"current_step": "write_test_cases" # Updated to next node
})
return {"generated_code": content}
except Exception as e:
self.logger.exception("Error in fix_code_after_security.")
st.error(f"Failed to fix code for security: {e}")
return {"error": str(e)}
def write_test_cases(self, state: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = f"""
Create test cases for the following code in brief :
{state.get('generated_code', 'No code available')}
Include positive, negative, edge, and security test cases.
Format the tests as a Markdown table with test steps and expected results.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"test_cases": content,
"current_result": content,
"current_step": "test_cases_review" # Updated to next node
})
return {"test_cases": content}
except Exception as e:
self.logger.exception("Error in write_test_cases.")
st.error(f"Failed to write test cases: {e}")
return {"error": str(e)}
def test_cases_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = f"""
Please review the following test cases and provide your feedback.
Test Cases:
{state.get('test_cases', 'No test cases generated')}
If the test cases are acceptable, type "approve". Otherwise, provide detailed feedback.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"test_feedback": content,
"current_step": "fix_test_cases" # Updated to next node
})
# st.session_state.user_decision = None
return {"test_feedback": content}
except Exception as e:
self.logger.exception("Error in test_cases_review.")
# st.error(f"Test cases review failed: {e}")
return {"error": str(e)}
def decision_test_cases_review(self, state: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = f"""
Based on the following test cases and the review feedback, please decide whether the test cases are acceptable or if further modifications are required.
Test Cases:
{state.get('test_cases', 'No test cases available')}
Review Feedback:
{state.get('test_feedback', 'No feedback provided')}
If the test cases are acceptable, output "approved". Otherwise, provide the modifications required.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"decision_test_cases": content,
"current_result": content,
"current_step": "fix_test_cases" # or update to the next step in your flow if needed
})
return {"decision_test_cases": content}
except Exception as e:
self.logger.exception("Error in decision_test_cases_review.")
st.error(f"Decision on test cases review failed: {e}")
return {"error": str(e)}
def fix_test_cases(self, state: Dict[str, Any]) -> Dict[str, Any]:
try:
prompt = f"""
Improve the test cases based on the following feedback:
Original Test Cases: {state.get('test_cases', 'No test cases available')}
Feedback: {state.get('test_feedback', 'No feedback provided')}
Ensure that all feedback points are addressed and that the Markdown table format is maintained.
"""
response = self.llm.invoke(prompt)
content = getattr(response, 'content', None)
if content is None:
raise ValueError("LLM response does not contain content.")
self._update_state({
"test_cases": content,
"current_step": "completed" # Final step
})
return {"test_cases": content}
except Exception as e:
self.logger.exception("Error in fix_test_cases.")
# st.error(f"Failed to fix test cases: {e}")
return {"error": str(e)}
|