TestForgeAI / app.py
SalwaM's picture
Create app.py
ecd11e4 verified
import gradio as gr
from datetime import datetime
from groq import Groq
import traceback
import json
import os
# --- 1. API KEY ---
api_key_coder= os.environ.get('Chat_with_Your_Context')
if not api_key_coder:
raise ValueError("Groq API key not found. Set fristapi environment variable.")
# --- 2. LLM CLIENT ---
class GroqLLM:
def __init__(self, api_key, model="meta-llama/llama-4-scout-17b-16e-instruct", temperature=0.1):
self.client = Groq(api_key=api_key)
self.model = model
self.temperature = temperature
def invoke(self, prompt):
try:
response = self.client.chat.completions.create(
model=self.model,
messages=[{"role": "user", "content": prompt}],
temperature=self.temperature,
max_tokens=2000
)
return response.choices[0].message.content
except Exception as e:
return f"LLM Error: {str(e)}"
llm = GroqLLM(api_key=api_key_coder)
# ============================================
# PART 1: SmartQA System (Multi-Source Test Generation)
# ============================================
# --- Dummy Runner and DOM Matcher for SmartQA ---
class DummyRunner:
def run(self, test_script):
if "fail" in test_script.lower() or "assert false" in test_script.lower():
return {
"status": "failed",
"error": "AssertionError: Expected True but got False",
"logs": "Stack trace: line 10 in test_function",
"dom": "<button id='submit-btn' class='btn'>Submit</button>"
}
return {"status": "passed", "message": "All tests passed successfully"}
class DOMMatcher:
def find_similar(self, dom, failed_locator):
return "button#submit-btn", 0.92
runner = DummyRunner()
dom_matcher = DOMMatcher()
# --- SmartQA Functions ---
def detect_failure(test_script):
return runner.run(test_script)
def analyze_root_cause(failure_data):
error = failure_data.get("error", "Unknown")
logs = failure_data.get("logs", "")
prompt = f"""
Analyze this test failure:
Error: {error}
Logs: {logs}
Provide:
1. Root cause analysis
2. Suggested fix
"""
analysis = llm.invoke(prompt)
return {"root_cause": analysis, "confidence": "high"}
def heal_locator(failure_data):
dom = failure_data.get("dom", "")
error = failure_data.get("error", "")
new_locator, score = dom_matcher.find_similar(dom, error)
return {"suggested_locator": new_locator, "confidence": score}
def update_script(script_content, old_locator, new_locator):
return script_content.replace(old_locator, new_locator)
def reexecute_test(test_script):
return runner.run(test_script)
def generate_report(data):
prompt = f"""
Generate a comprehensive QA report based on this data:
{json.dumps(data, indent=2, ensure_ascii=False)}
Include:
- Test Execution Summary
- Failures Detected
- Root Cause Analysis
- Healing Actions
- Final Results
- Recommendations
"""
return llm.invoke(prompt)
# --- SmartQA Knowledge Classes ---
class KnowledgeInput:
def __init__(self, requirements=None, dom=None, api_spec=None, user_flows=None, source_code=None, recording=None):
self.requirements = requirements
self.dom = dom
self.api_spec = api_spec
self.user_flows = user_flows
self.source_code = source_code
self.recording = recording
class KnowledgeProcessor:
def parse_requirements(self, text): return text.strip()
def parse_dom(self, dom_text): return dom_text[:4000]
def parse_api(self, api_text): return api_text[:4000]
def parse_flows(self, flows_text): return flows_text.strip()
def analyze_code(self, code_text): return code_text[:4000]
def parse_recording(self, rec_text): return rec_text.strip()
def process(self, knowledge):
data = {}
if knowledge.requirements: data["req"] = self.parse_requirements(knowledge.requirements)
if knowledge.dom: data["ui"] = self.parse_dom(knowledge.dom)
if knowledge.api_spec: data["api"] = self.parse_api(knowledge.api_spec)
if knowledge.user_flows: data["flows"] = self.parse_flows(knowledge.user_flows)
if knowledge.source_code: data["code"] = self.analyze_code(knowledge.source_code)
if knowledge.recording: data["record"] = self.parse_recording(knowledge.recording)
return data
class TestGenerator:
def __init__(self, llm): self.llm = llm
def generate_req_tests(self, data): return self.llm.invoke(f"Generate Python Selenium tests from requirements:\n{data['req']}")
def generate_ui_tests(self, data): return self.llm.invoke(f"Generate Selenium UI tests from DOM:\n{data['ui']}")
def generate_api_tests(self, data): return self.llm.invoke(f"Generate API tests from spec:\n{data['api']}")
def generate_flow_tests(self, data): return self.llm.invoke(f"Generate E2E tests from flows:\n{data['flows']}")
def generate_code_tests(self, data): return self.llm.invoke(f"Generate tests from code:\n{data['code']}")
def generate_record_tests(self, data): return self.llm.invoke(f"Convert recording to test:\n{data['record']}")
def generate(self, processed_data):
if "api" in processed_data: return self.generate_api_tests(processed_data)
if "ui" in processed_data: return self.generate_ui_tests(processed_data)
if "flows" in processed_data: return self.generate_flow_tests(processed_data)
if "req" in processed_data: return self.generate_req_tests(processed_data)
if "code" in processed_data: return self.generate_code_tests(processed_data)
if "record" in processed_data: return self.generate_record_tests(processed_data)
return "No valid input provided"
def run_complete_analysis(test_script):
report_data = {"original_script": test_script, "steps": [], "final_result": {}, "healing_applied": False}
result = detect_failure(test_script)
report_data["steps"].append({"step": "initial_execution", "result": result})
if result["status"] == "failed":
report_data["healing_applied"] = True
analysis = analyze_root_cause(result)
report_data["steps"].append({"step": "root_cause_analysis", "analysis": analysis})
healing = heal_locator(result)
report_data["steps"].append({"step": "healing_attempt", "healing": healing})
if "suggested_locator" in healing:
old = "button"
new = healing["suggested_locator"]
updated_script = update_script(test_script, old, new)
report_data["steps"].append({"step": "script_updated", "new_script": updated_script})
final_result = reexecute_test(updated_script)
report_data["final_result"] = final_result
report_data["steps"].append({"step": "re_execution", "result": final_result})
else:
report_data["final_result"] = result
report = generate_report(report_data)
report_data["full_report"] = report
return report_data
class SmartQASystem:
def __init__(self, llm):
self.processor = KnowledgeProcessor()
self.generator = TestGenerator(llm)
def run(self, knowledge):
processed = self.processor.process(knowledge)
generated_tests = self.generator.generate(processed)
results = run_complete_analysis(generated_tests)
return {
"generated_test": generated_tests,
"updated_test": results.get("steps", [{}])[-1].get("new_script", generated_tests) if results.get("healing_applied") else generated_tests,
"initial_result": str(results["steps"][0]["result"]),
"final_result": str(results["final_result"]),
"report": results["full_report"]
}
smartqa = SmartQASystem(llm)
# ============================================
# PART 2: HealTest AI System (Simple Test Healing)
# ============================================
def process_test_inputs(script_text, testcase_text):
try:
if not script_text.strip():
return "⛔ Please paste a test script."
result = run_complete_analysis(script_text)
preview = script_text[:500]
if len(script_text) > 500:
preview += "..."
display = (
"# 📊 HealTest AI Analysis Result\n\n"
"## 🧾 Test Script:\n"
"```python\n"
+ preview +
"\n```\n\n"
"## 📋 Test Case:\n"
+ testcase_text +
"\n\n## 🔍 Analysis Steps:\n"
)
for step in result["steps"]:
display += f"\n### ➡️ {step['step']}\n"
display += f"```\n{json.dumps(step, indent=2, ensure_ascii=False)}\n```\n"
display += "\n## 📈 Final Result: " + json.dumps(result["final_result"], ensure_ascii=False) + "\n"
display += "\n## 📝 Full Report:\n" + str(result["full_report"]) + "\n"
return display
except Exception as e:
return f"❌ Error: {str(e)}\n\n{traceback.format_exc()}"
# ============================================
# PART 3: SmartQA Interface Functions
# ============================================
def run_smartqa(requirements, dom, api_spec, flows, code, recording):
try:
knowledge = KnowledgeInput(
requirements=requirements,
dom=dom,
api_spec=api_spec,
user_flows=flows,
source_code=code,
recording=recording
)
result = smartqa.run(knowledge)
return (
result["generated_test"],
result["updated_test"],
result["initial_result"],
result["final_result"],
result["report"]
)
except Exception as e:
error_msg = f"Error: {str(e)}\n{traceback.format_exc()}"
return error_msg, error_msg, error_msg, error_msg, error_msg
def load_smartqa_examples():
example_requirements = """
User can login with email and password
User can search for a product
User can add product to cart
"""
example_dom = """
<html>
<body>
<input id=\"email\" />
<input id=\"password\" />
<button id=\"login-btn\">Login</button>
<input id=\"search\" />
<button id=\"search-btn\">Search</button>
<button id=\"add-cart\">Add to Cart</button>
</body>
</html>
"""
example_api = """
POST /login
Body: { email, password }
GET /products
POST /cart
Body: { product_id }
"""
example_flows = """
Open login page
Enter email and password
Click login
Search product
Add to cart
"""
example_code = """
@app.route('/login', methods=['POST'])
def login():
email = request.json['email']
password = request.json['password']
if authenticate(email, password):
return {'status': 'ok'}
return {'status': 'fail'}, 401
"""
example_recording = """
User navigates to /login
Types email test@mail.com
Types password 123456
Clicks Login button
Navigates to /products
Clicks Add to Cart
"""
return (example_requirements, example_dom, example_api, example_flows, example_code, example_recording)
# ============================================
# PART 4: HealTest Examples
# ============================================
heal_example_scripts = {
"example1": """def test_login_ui():
driver.get("https://example.com/login")
driver.find_element(By.ID, "submit-btn").click()
assert False""",
"example2": """def test_add_to_cart():
driver.get("https://shop.com/product/1")
driver.find_element(By.ID, "add-to-cart").click()
assert False""",
"example3": """def test_submit_button():
driver.get("https://example.com/form")
driver.find_element(By.TAG_NAME, "button").click()
assert False"""
}
heal_example_testcases = {
"example1": """Test: Login via UI
Steps:
- Open login page
- Click Login button
Expected:
- User redirected to dashboard""",
"example2": """Test: Add product to cart
Steps:
- Open product page
- Click Add to Cart
Expected:
- Product appears in cart""",
"example3": """Test: Submit form
Steps:
- Open form page
- Click Submit
Expected:
- Form submitted successfully"""
}
def load_heal_example1():
return heal_example_scripts["example1"], heal_example_testcases["example1"]
def load_heal_example2():
return heal_example_scripts["example2"], heal_example_testcases["example2"]
def load_heal_example3():
return heal_example_scripts["example3"], heal_example_testcases["example3"]
# ============================================
# PART 5: Unified Gradio Interface
# ============================================
with gr.Blocks(title="SmartQA + HealTest AI", theme=gr.themes.Soft()) as demo:
gr.Markdown("""
# 🧠 SmartQA + HealTest AI - Unified Testing Platform
### Multi-Source Test Generation + Self-Healing Test Automation
""")
with gr.Tab("🏠 Home"):
gr.Markdown("""
## Welcome to the Unified Testing Platform!
This platform combines two powerful AI-powered testing tools:
### 1. 🎯 **SmartQA - Multi-Source Test Generation**
- Generate tests from multiple sources:
- Requirements
- UI/DOM
- API Specifications
- User Flows
- Source Code
- User Recordings
- AI-powered test generation
- Comprehensive test coverage
### 2. 🔧 **HealTest AI - Self-Healing Test Automation**
- Paste your test script
- Add test case description
- AI detects failures
- Automatically heals broken locators
- Generates detailed reports
### 🚀 How to Use:
1. Navigate to the desired tool using the tabs above
2. Input your data or load examples
3. Click the analyze button
4. View comprehensive results
### 💡 Benefits:
- Save hours of manual test writing
- Automatic test maintenance
- Reduced flaky tests
- Better test coverage
- AI-powered insights
""")
with gr.Tab("🎯 SmartQA - Multi-Source Generator"):
gr.Markdown("### Generate tests from multiple sources")
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("#### Input Sources")
with gr.Tab("Requirements"):
req_input = gr.Textbox(lines=4, label="Requirements", placeholder="Enter functional requirements...")
with gr.Tab("UI / DOM"):
dom_input = gr.Textbox(lines=4, label="HTML DOM", placeholder="Paste HTML DOM structure...")
with gr.Tab("API Spec"):
api_input = gr.Textbox(lines=4, label="API Specification", placeholder="Enter API endpoints and specs...")
with gr.Tab("User Flows"):
flow_input = gr.Textbox(lines=4, label="User Flows", placeholder="Describe user interaction flows...")
with gr.Tab("Source Code"):
code_input = gr.Textbox(lines=4, label="Source Code", placeholder="Paste source code to analyze...")
with gr.Tab("Recording"):
rec_input = gr.Textbox(lines=4, label="Recording", placeholder="Paste user interaction recording...")
with gr.Column(scale=1):
gr.Markdown("#### Actions & Results")
with gr.Row():
smartqa_run_btn = gr.Button("🚀 Generate Tests", variant="primary", size="lg")
smartqa_example_btn = gr.Button("📂 Load Examples", size="lg")
gr.Markdown("---")
with gr.Row():
with gr.Column():
gr.Markdown("#### Generated Test")
smartqa_gen_out = gr.Code(label="Generated Test", language="python")
with gr.Column():
gr.Markdown("#### Healed Test")
smartqa_upd_out = gr.Code(label="Healed Test", language="python")
with gr.Row():
with gr.Column():
smartqa_init_out = gr.Textbox(label="Initial Result", lines=3)
with gr.Column():
smartqa_final_out = gr.Textbox(label="Final Result", lines=3)
smartqa_report_out = gr.Textbox(label="QA Report", lines=10)
# SmartQA event handlers
smartqa_example_btn.click(
fn=load_smartqa_examples,
inputs=[],
outputs=[req_input, dom_input, api_input, flow_input, code_input, rec_input]
)
smartqa_run_btn.click(
fn=run_smartqa,
inputs=[req_input, dom_input, api_input, flow_input, code_input, rec_input],
outputs=[smartqa_gen_out, smartqa_upd_out, smartqa_init_out, smartqa_final_out, smartqa_report_out]
)
with gr.Tab("🔧 HealTest AI - Self-Healing"):
gr.Markdown("### Self-Healing Test Automation")
with gr.Row():
with gr.Column():
gr.Markdown("#### Input")
heal_script_input = gr.Textbox(
label="📝 Paste Test Script",
lines=10,
placeholder="# Paste your Python test script here...\ndef test_example():\n driver.find_element(By.ID, 'submit-btn').click()\n assert True"
)
heal_testcase_input = gr.Textbox(
label="📋 Test Case Description",
lines=6,
placeholder="Describe what the test should do...\n\nSteps:\n1. Open page\n2. Click button\n3. Verify result"
)
with gr.Row():
heal_analyze_btn = gr.Button("🚀 Start Analysis", variant="primary", size="lg")
with gr.Row():
heal_example1_btn = gr.Button("📂 Example 1 - Login", size="sm")
heal_example2_btn = gr.Button("📂 Example 2 - Add to Cart", size="sm")
heal_example3_btn = gr.Button("📂 Example 3 - Submit", size="sm")
with gr.Column():
gr.Markdown("#### Results")
heal_output = gr.Markdown(label="Analysis Result", value="Click 'Start Analysis' to begin...")
# HealTest event handlers
heal_example1_btn.click(load_heal_example1, [], [heal_script_input, heal_testcase_input])
heal_example2_btn.click(load_heal_example2, [], [heal_script_input, heal_testcase_input])
heal_example3_btn.click(load_heal_example3, [], [heal_script_input, heal_testcase_input])
heal_analyze_btn.click(
fn=process_test_inputs,
inputs=[heal_script_input, heal_testcase_input],
outputs=heal_output
)
with gr.Tab("📊 About"):
gr.Markdown("""
## About This Platform
### SmartQA System
An AI-powered test generation system that creates automated tests from various input sources:
- **Requirements**: Converts functional requirements to test scripts
- **UI/DOM**: Generates Selenium tests from HTML structure
- **API Specs**: Creates API tests from OpenAPI/Swagger
- **User Flows**: Builds E2E tests from user scenarios
- **Source Code**: Analyzes code to generate relevant tests
- **Recordings**: Transforms user interactions into test scripts
### HealTest AI
A self-healing test automation system that:
1. **Detects** test failures automatically
2. **Analyzes** root causes using AI
3. **Heals** broken locators by finding alternatives
4. **Updates** test scripts automatically
5. **Re-executes** to verify the fix
6. **Generates** comprehensive reports
### Technology Stack
- **Frontend**: Gradio
- **AI Model**: Meta Llama-4 (via Groq API)
- **Backend**: Python
### Created by
A unified testing solution for modern QA teams
""")
# ============================================
# LAUNCH THE APPLICATION
# ============================================
if __name__ == "__main__":
demo.launch(share=True, debug=False, server_name="0.0.0.0")