QA_VAM / optimized_app.py
JanviMl's picture
Create optimized_app.py
de29208 verified
import gradio as gr
import json
import time
import random
from datetime import datetime
from functools import lru_cache
# Optimized version with rate limiting protection
class RateLimitedDemo:
def __init__(self):
self.request_count = 0
self.last_request_time = 0
self.min_delay = 2 # Minimum 2 seconds between requests
def check_rate_limit(self):
"""Simple rate limiting to avoid overwhelming the service"""
current_time = time.time()
if current_time - self.last_request_time < self.min_delay:
time.sleep(self.min_delay - (current_time - self.last_request_time))
self.last_request_time = time.time()
self.request_count += 1
# Initialize demo controller
demo_controller = RateLimitedDemo()
# Load data with error handling
def load_json_safe(filename, default=[]):
try:
with open(filename, 'r') as f:
return json.load(f)
except FileNotFoundError:
return default
# Pre-load data to avoid repeated file access
DUMMY_STORIES = load_json_safe('dummy_user_stories.json', [
{
"id": 1,
"title": "Invoice Generation",
"description": "As a policyholder, I want an invoice generated within 24 hours after policy issuance so I can view my premium charges."
},
{
"id": 2,
"title": "Credit Card Payment",
"description": "As a policyholder, I want to pay my premium using a credit card with proper CVV validation so my payment is secure."
}
])
# Cached responses to reduce computation
@lru_cache(maxsize=128)
def get_cached_bdd(user_story_hash):
"""Generate BDD scenario with caching"""
return """
Feature: Insurance System Functionality
As a user of the insurance system
I want reliable and secure operations
So that I can manage my insurance needs effectively
Scenario: Successful operation completion
Given I am a registered user with valid credentials
And I have access to the insurance system
When I perform the required insurance operation
Then the system should process my request successfully
And I should receive appropriate confirmation
And the system should maintain data integrity
And all security protocols should be followed
Scenario: Error handling and user feedback
Given I am using the insurance system
When an error occurs during processing
Then the system should handle the error gracefully
And I should receive clear error messages
And the system should suggest corrective actions
And my data should remain secure and intact
"""
@lru_cache(maxsize=128)
def get_cached_test_script(bdd_hash):
"""Generate test script with caching"""
return '''
import pytest
from unittest.mock import Mock, patch
from datetime import datetime
class TestInsuranceSystemFunctionality:
"""Comprehensive test suite for insurance system operations"""
def setup_method(self):
"""Initialize test environment before each test"""
self.system_service = Mock()
self.test_user = {
"id": "USER-001",
"name": "Test User",
"email": "test@example.com",
"policy_id": "POL-12345"
}
def test_successful_operation_completion(self):
"""Test successful completion of insurance operations"""
# Given - User with valid credentials and system access
user_credentials = {"username": "testuser", "password": "secure123"}
self.system_service.authenticate.return_value = {"success": True, "user_id": "USER-001"}
# When - User performs required insurance operation
auth_result = self.system_service.authenticate(user_credentials)
operation_result = self.system_service.process_request(self.test_user)
# Then - System processes request successfully
assert auth_result["success"] is True
assert operation_result["status"] == "completed"
assert operation_result["confirmation_id"] is not None
# And - Appropriate confirmation is provided
self.system_service.send_confirmation.assert_called_once()
def test_error_handling_and_user_feedback(self):
"""Test system error handling and user feedback mechanisms"""
# Given - System experiencing an error condition
self.system_service.process_request.side_effect = Exception("System temporarily unavailable")
# When - Error occurs during processing
result = self.system_service.handle_error(self.test_user)
# Then - System handles error gracefully
assert result["status"] == "error"
assert "error_message" in result
assert result["user_notified"] is True
# And - Clear error messages and suggestions provided
assert "temporarily unavailable" in result["error_message"].lower()
assert "corrective_actions" in result
@patch('insurance.security.SecurityManager')
def test_security_protocols_maintained(self, mock_security):
"""Test that security protocols are properly maintained"""
# Given - Security manager is active
mock_security.return_value.validate_session.return_value = True
mock_security.return_value.encrypt_data.return_value = "encrypted_data"
# When - Processing user request
result = self.system_service.secure_process(self.test_user)
# Then - Security protocols are followed
mock_security.return_value.validate_session.assert_called_once()
mock_security.return_value.encrypt_data.assert_called()
assert result["security_validated"] is True
def test_data_integrity_maintenance(self):
"""Test that data integrity is maintained throughout operations"""
# Given - Initial data state
initial_data = {"policy_id": "POL-12345", "premium": 1200.00}
self.system_service.get_policy_data.return_value = initial_data
# When - Data operations are performed
self.system_service.update_policy(initial_data["policy_id"], {"status": "active"})
final_data = self.system_service.get_policy_data()
# Then - Data integrity is maintained
assert final_data["policy_id"] == initial_data["policy_id"]
assert final_data["premium"] == initial_data["premium"]
assert final_data["status"] == "active"
def test_system_performance_requirements(self):
"""Test that system meets performance requirements"""
# Given - Performance monitoring is active
start_time = time.time()
# When - System operation is performed
result = self.system_service.process_request(self.test_user)
end_time = time.time()
# Then - Performance requirements are met
response_time = end_time - start_time
assert response_time < 5.0 # Response within 5 seconds
assert result["performance_metrics"]["response_time"] < 5000 # milliseconds
'''
def simple_test_execution():
"""Simplified test execution simulation"""
outcomes = [
{
"status": "PASS",
"log": "βœ… All tests passed successfully!\n\nTests executed: 5\nPassed: 5\nFailed: 0\nExecution time: 2.3 seconds"
},
{
"status": "FAIL",
"log": "❌ Some tests failed\n\nTests executed: 5\nPassed: 3\nFailed: 2\n\nFailures:\n- Authentication timeout\n- Data validation error\n\nExecution time: 4.1 seconds"
}
]
return random.choice(outcomes)
def get_simple_defect_summary():
"""Simplified defect summary"""
return """
πŸ› **Test Failure Analysis**
**Issue:** System response time exceeded threshold
**Severity:** Medium
**Impact:** User experience degradation
**Root Cause:**
- Database query optimization needed
- Connection pool configuration issue
**Recommended Fix:**
1. Optimize slow database queries
2. Increase connection pool size
3. Implement query caching
4. Add performance monitoring
**Next Steps:**
- Create performance improvement ticket
- Schedule optimization sprint
- Implement monitoring dashboard
"""
def process_test_lifecycle_optimized(selected_story_id, custom_story=""):
"""Optimized version with rate limiting and caching"""
# Apply rate limiting
demo_controller.check_rate_limit()
try:
# Step 1: Get user story
if custom_story.strip():
user_story = custom_story.strip()
story_title = "Custom Story"
else:
story_data = next((s for s in DUMMY_STORIES if s["id"] == int(selected_story_id)), None)
if not story_data:
return "❌ Story not found", "", "", "", "", ""
user_story = story_data["description"]
story_title = story_data["title"]
# Step 2: Generate BDD (cached)
yield (
f"πŸ”„ Processing: {story_title}",
f"**User Story:** {user_story}",
"⏳ Generating BDD scenario...",
"", "", ""
)
time.sleep(1) # Simulate processing
story_hash = hash(user_story)
bdd_scenario = get_cached_bdd(story_hash)
# Step 3: Generate Test Script (cached)
yield (
f"βœ… Story: {story_title}",
f"**User Story:** {user_story}",
f"**BDD Scenario:**\n```gherkin\n{bdd_scenario}\n```",
"⏳ Generating test script...",
"", ""
)
time.sleep(1) # Simulate processing
bdd_hash = hash(bdd_scenario)
test_script = get_cached_test_script(bdd_hash)
# Step 4: Execute Tests
yield (
f"βœ… Story: {story_title}",
f"**User Story:** {user_story}",
f"**BDD Scenario:**\n```gherkin\n{bdd_scenario}\n```",
f"**Test Script:**\n```python\n{test_script}\n```",
"⏳ Executing tests...",
""
)
time.sleep(1) # Simulate execution
execution_result = simple_test_execution()
# Step 5: Generate defect summary if needed
status_icon = "βœ…" if execution_result["status"] == "PASS" else "❌"
defect_summary = ""
if execution_result["status"] == "FAIL":
yield (
f"βœ… Story: {story_title}",
f"**User Story:** {user_story}",
f"**BDD Scenario:**\n```gherkin\n{bdd_scenario}\n```",
f"**Test Script:**\n```python\n{test_script}\n```",
f"**Execution Status:** {status_icon} {execution_result['status']}\n\n**Log:**\n```\n{execution_result['log']}\n```",
"⏳ Analyzing failures..."
)
time.sleep(1)
defect_summary = get_simple_defect_summary()
else:
defect_summary = "πŸŽ‰ All tests passed! No defects found."
# Final result
yield (
f"βœ… Complete: {story_title}",
f"**User Story:** {user_story}",
f"**BDD Scenario:**\n```gherkin\n{bdd_scenario}\n```",
f"**Test Script:**\n```python\n{test_script}\n```",
f"**Execution Status:** {status_icon} {execution_result['status']}\n\n**Log:**\n```\n{execution_result['log']}\n```",
f"**Analysis:**\n{defect_summary}"
)
except Exception as e:
yield (
"❌ Error occurred",
f"Error: {str(e)}",
"Please try again in a moment.",
"", "", ""
)
# Lightweight Gradio interface
with gr.Blocks(
theme=gr.themes.Default(),
title="STLC-AI Demo (Optimized)",
css=".gradio-container {max-width: 1000px !important;}"
) as demo:
gr.Markdown("""
# πŸ€– STLC-AI: GenAI Test Automation Demo
**Optimized version** - Demonstrates AI-powered test lifecycle automation for insurance systems.
⚑ **Rate-limited for stability** - Please allow 2-3 seconds between operations.
""")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("### πŸ“ Select User Story")
story_dropdown = gr.Dropdown(
choices=[(f"{s['title']}", str(s['id'])) for s in DUMMY_STORIES],
label="Pre-loaded Stories",
value="1"
)
custom_story_input = gr.Textbox(
label="OR Enter Custom Story",
placeholder="As a [role], I want [goal] so that [benefit]...",
lines=2
)
process_btn = gr.Button("πŸš€ Start Test Lifecycle", variant="primary")
gr.Markdown(f"""
### πŸ“Š Demo Stats
- **Stories Available:** {len(DUMMY_STORIES)}
- **Requests Processed:** {demo_controller.request_count}
- **Status:** βœ… Optimized for rate limits
""")
with gr.Column(scale=2):
status_display = gr.Textbox(label="Status", interactive=False)
with gr.Accordion("πŸ“– User Story", open=True):
story_output = gr.Markdown()
with gr.Accordion("🎯 BDD Scenario", open=False):
bdd_output = gr.Markdown()
with gr.Accordion("πŸ§ͺ Test Script", open=False):
script_output = gr.Markdown()
with gr.Accordion("⚑ Results", open=False):
execution_output = gr.Markdown()
with gr.Accordion("πŸ” Analysis", open=False):
defect_output = gr.Markdown()
# Event handler
process_btn.click(
fn=process_test_lifecycle_optimized,
inputs=[story_dropdown, custom_story_input],
outputs=[status_display, story_output, bdd_output, script_output, execution_output, defect_output]
)
gr.Markdown("""
---
### πŸ’‘ Tips for Best Experience:
- Wait for each step to complete before starting a new test
- Try different user stories to see various outputs
- The demo uses cached responses for faster performance
**πŸ”§ Optimizations Applied:**
- Rate limiting protection
- Response caching
- Simplified processing
- Reduced API calls
""")
if __name__ == "__main__":
demo.launch(
share=False, # Disable sharing to reduce load
server_name="0.0.0.0",
server_port=7860,
quiet=True, # Reduce logging
show_error=True
)