Akshayram1 commited on
Commit
eda0722
Β·
verified Β·
1 Parent(s): 72f4299

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1011 -0
app.py CHANGED
@@ -0,0 +1,1011 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import openai
3
+ import os
4
+ import json
5
+ import requests
6
+ import yaml
7
+ from PIL import Image
8
+ import io
9
+ import base64
10
+ from typing import Dict, List, Any
11
+ import pandas as pd
12
+ import re
13
+ from datetime import datetime
14
+ from dotenv import load_dotenv
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+
19
+ # Initialize OpenAI client
20
+ def init_openai():
21
+ api_key = os.getenv("OPENAI_API_KEY")
22
+ if not api_key:
23
+ raise ValueError("❌ OpenAI API key not found in environment variables. Please set OPENAI_API_KEY.")
24
+ return openai.OpenAI(api_key=api_key)
25
+
26
+ client = init_openai()
27
+
28
+ # Enhanced utility functions
29
+ def encode_image(image):
30
+ """Encode image to base64 for OpenAI Vision API"""
31
+ if isinstance(image, str):
32
+ with open(image, "rb") as image_file:
33
+ return base64.b64encode(image_file.read()).decode()
34
+ else:
35
+ buffer = io.BytesIO()
36
+ image.save(buffer, format="PNG")
37
+ return base64.b64encode(buffer.getvalue()).decode()
38
+
39
+ def call_openai_chat(messages, model="gpt-4o-mini", max_tokens=3000):
40
+ """Enhanced OpenAI API call with better error handling"""
41
+ try:
42
+ response = client.chat.completions.create(
43
+ model=model,
44
+ messages=messages,
45
+ max_tokens=max_tokens,
46
+ temperature=0.3 # Lower temperature for more consistent results
47
+ )
48
+ return response.choices[0].message.content
49
+ except Exception as e:
50
+ return f"OpenAI API Error: {str(e)}"
51
+
52
+ def parse_test_cases_to_dataframe(text_response):
53
+ """Enhanced parsing with better regex patterns"""
54
+ try:
55
+ test_cases = []
56
+
57
+ # Enhanced regex patterns for better extraction
58
+ test_case_pattern = r'(?:Test Case|TC)\s*(?:ID|#)?\s*:?\s*([^\n]+)'
59
+ title_pattern = r'(?:Title|Test Case Title|Name)\s*:?\s*([^\n]+)'
60
+ precondition_pattern = r'(?:Precondition|Pre-condition|Prerequisites?)\s*:?\s*([^\n]+)'
61
+ steps_pattern = r'(?:Test Steps?|Steps|Procedure)\s*:?\s*((?:[^\n]*\n?)*?)(?=Expected|Priority|Test Case|$)'
62
+ expected_pattern = r'(?:Expected Result|Expected|Result)\s*:?\s*([^\n]+)'
63
+ priority_pattern = r'(?:Priority|Severity)\s*:?\s*([^\n]+)'
64
+ test_data_pattern = r'(?:Test Data|Data)\s*:?\s*([^\n]+)'
65
+
66
+ # Split into test case blocks more accurately
67
+ blocks = re.split(r'\n\s*(?=(?:Test Case|TC)\s*(?:ID|#|\d+))', text_response, flags=re.IGNORECASE)
68
+
69
+ for i, block in enumerate(blocks):
70
+ if len(block.strip()) < 30:
71
+ continue
72
+
73
+ test_case = {}
74
+
75
+ # Extract components with fallbacks
76
+ id_match = re.search(test_case_pattern, block, re.IGNORECASE)
77
+ test_case['Test_Case_ID'] = id_match.group(1).strip() if id_match else f"TC_{len(test_cases)+1:03d}"
78
+
79
+ title_match = re.search(title_pattern, block, re.IGNORECASE)
80
+ test_case['Title'] = title_match.group(1).strip() if title_match else f"Test Case {len(test_cases)+1}"
81
+
82
+ precond_match = re.search(precondition_pattern, block, re.IGNORECASE)
83
+ test_case['Preconditions'] = precond_match.group(1).strip() if precond_match else "N/A"
84
+
85
+ steps_match = re.search(steps_pattern, block, re.IGNORECASE | re.DOTALL)
86
+ test_case['Test_Steps'] = steps_match.group(1).strip() if steps_match else "Steps not specified"
87
+
88
+ expected_match = re.search(expected_pattern, block, re.IGNORECASE)
89
+ test_case['Expected_Results'] = expected_match.group(1).strip() if expected_match else "Expected result not specified"
90
+
91
+ priority_match = re.search(priority_pattern, block, re.IGNORECASE)
92
+ test_case['Priority'] = priority_match.group(1).strip() if priority_match else "Medium"
93
+
94
+ data_match = re.search(test_data_pattern, block, re.IGNORECASE)
95
+ test_case['Test_Data'] = data_match.group(1).strip() if data_match else "N/A"
96
+
97
+ test_case['Created_Date'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
98
+ test_case['Status'] = "New"
99
+
100
+ test_cases.append(test_case)
101
+
102
+ # Enhanced fallback parsing
103
+ if not test_cases:
104
+ lines = [line.strip() for line in text_response.split('\n') if line.strip()]
105
+ current_case = {}
106
+
107
+ for line in lines:
108
+ if any(keyword in line.lower() for keyword in ['test case', 'tc', 'scenario']):
109
+ if current_case:
110
+ test_cases.append(current_case)
111
+ current_case = {
112
+ 'Test_Case_ID': f"TC_{len(test_cases)+1:03d}",
113
+ 'Title': line[:100],
114
+ 'Preconditions': "N/A",
115
+ 'Test_Steps': "",
116
+ 'Expected_Results': "",
117
+ 'Priority': "Medium",
118
+ 'Test_Data': "N/A",
119
+ 'Created_Date': datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
120
+ 'Status': "New"
121
+ }
122
+ elif current_case:
123
+ if not current_case.get('Test_Steps'):
124
+ current_case['Test_Steps'] = line
125
+ elif not current_case.get('Expected_Results'):
126
+ current_case['Expected_Results'] = line
127
+
128
+ if current_case:
129
+ test_cases.append(current_case)
130
+
131
+ return pd.DataFrame(test_cases) if test_cases else pd.DataFrame({
132
+ 'Test_Case_ID': ['TC_001'],
133
+ 'Title': ['Sample Test Case'],
134
+ 'Preconditions': ['N/A'],
135
+ 'Test_Steps': ['Parse failed - manual review needed'],
136
+ 'Expected_Results': ['Manual review needed'],
137
+ 'Priority': ['Medium'],
138
+ 'Test_Data': ['N/A'],
139
+ 'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
140
+ 'Status': ['New']
141
+ })
142
+
143
+ except Exception as e:
144
+ return pd.DataFrame({
145
+ 'Test_Case_ID': ['TC_001'],
146
+ 'Title': ['Parsing Error'],
147
+ 'Preconditions': ['N/A'],
148
+ 'Test_Steps': [f'Error: {str(e)}'],
149
+ 'Expected_Results': ['Manual review needed'],
150
+ 'Priority': ['High'],
151
+ 'Test_Data': ['N/A'],
152
+ 'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
153
+ 'Status': ['Error']
154
+ })
155
+
156
+ def parse_api_tests_to_dataframe(text_response):
157
+ """Enhanced API test parsing"""
158
+ try:
159
+ api_tests = []
160
+
161
+ # Enhanced patterns for API tests
162
+ test_id_pattern = r'(?:Test Case|API Test|Test)\s*(?:ID|#)?\s*:?\s*([^\n]+)'
163
+ method_pattern = r'(?:HTTP Method|Method)\s*:?\s*([^\n]+)'
164
+ endpoint_pattern = r'(?:Endpoint|URL|Path)\s*:?\s*([^\n]+)'
165
+ description_pattern = r'(?:Description|Test Description)\s*:?\s*([^\n]+)'
166
+ headers_pattern = r'(?:Request Headers?|Headers)\s*:?\s*((?:[^\n]*\n?)*?)(?=Request Body|Expected|Test Case|$)'
167
+ body_pattern = r'(?:Request Body|Body|Payload)\s*:?\s*((?:[^\n]*\n?)*?)(?=Expected|Response|Test Case|$)'
168
+ status_pattern = r'(?:Expected Status|Status Code|Response Code)\s*:?\s*([^\n]+)'
169
+ response_pattern = r'(?:Expected Response|Response)\s*:?\s*((?:[^\n]*\n?)*?)(?=Test Case|$)'
170
+ category_pattern = r'(?:Category|Type)\s*:?\s*([^\n]+)'
171
+
172
+ blocks = re.split(r'\n\s*(?=(?:Test Case|API Test))', text_response, flags=re.IGNORECASE)
173
+
174
+ for block in blocks:
175
+ if len(block.strip()) < 30:
176
+ continue
177
+
178
+ api_test = {}
179
+
180
+ id_match = re.search(test_id_pattern, block, re.IGNORECASE)
181
+ api_test['Test_Case_ID'] = id_match.group(1).strip() if id_match else f"API_TC_{len(api_tests)+1:03d}"
182
+
183
+ method_match = re.search(method_pattern, block, re.IGNORECASE)
184
+ api_test['HTTP_Method'] = method_match.group(1).strip() if method_match else "GET"
185
+
186
+ endpoint_match = re.search(endpoint_pattern, block, re.IGNORECASE)
187
+ api_test['Endpoint'] = endpoint_match.group(1).strip() if endpoint_match else "/api/endpoint"
188
+
189
+ desc_match = re.search(description_pattern, block, re.IGNORECASE)
190
+ api_test['Description'] = desc_match.group(1).strip() if desc_match else "API Test Description"
191
+
192
+ headers_match = re.search(headers_pattern, block, re.IGNORECASE | re.DOTALL)
193
+ api_test['Request_Headers'] = headers_match.group(1).strip() if headers_match else "Content-Type: application/json"
194
+
195
+ body_match = re.search(body_pattern, block, re.IGNORECASE | re.DOTALL)
196
+ api_test['Request_Body'] = body_match.group(1).strip() if body_match else "N/A"
197
+
198
+ status_match = re.search(status_pattern, block, re.IGNORECASE)
199
+ api_test['Expected_Status_Code'] = status_match.group(1).strip() if status_match else "200"
200
+
201
+ response_match = re.search(response_pattern, block, re.IGNORECASE | re.DOTALL)
202
+ api_test['Expected_Response'] = response_match.group(1).strip() if response_match else "Success response"
203
+
204
+ category_match = re.search(category_pattern, block, re.IGNORECASE)
205
+ api_test['Test_Category'] = category_match.group(1).strip() if category_match else "Functional"
206
+
207
+ api_test['Created_Date'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
208
+ api_test['Status'] = "New"
209
+
210
+ api_tests.append(api_test)
211
+
212
+ return pd.DataFrame(api_tests) if api_tests else pd.DataFrame({
213
+ 'Test_Case_ID': ['API_TC_001'],
214
+ 'HTTP_Method': ['GET'],
215
+ 'Endpoint': ['/api/test'],
216
+ 'Description': ['Sample API Test'],
217
+ 'Request_Headers': ['Content-Type: application/json'],
218
+ 'Request_Body': ['N/A'],
219
+ 'Expected_Status_Code': ['200'],
220
+ 'Expected_Response': ['Success'],
221
+ 'Test_Category': ['Functional'],
222
+ 'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
223
+ 'Status': ['New']
224
+ })
225
+
226
+ except Exception as e:
227
+ return pd.DataFrame({
228
+ 'Test_Case_ID': ['API_TC_001'],
229
+ 'HTTP_Method': ['GET'],
230
+ 'Endpoint': ['/api/error'],
231
+ 'Description': [f'Parsing Error: {str(e)}'],
232
+ 'Request_Headers': ['Content-Type: application/json'],
233
+ 'Request_Body': ['N/A'],
234
+ 'Expected_Status_Code': ['500'],
235
+ 'Expected_Response': ['Error'],
236
+ 'Test_Category': ['Error'],
237
+ 'Created_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
238
+ 'Status': ['Error']
239
+ })
240
+
241
+ def create_download_csv(df, filename_prefix):
242
+ """Create CSV for download"""
243
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
244
+ filename = f"{filename_prefix}_{timestamp}.csv"
245
+ csv_path = f"/tmp/{filename}"
246
+ df.to_csv(csv_path, index=False)
247
+ return csv_path
248
+
249
+ # Enhanced test case generation with better prompts
250
+ def generate_test_cases_from_text(requirements, test_types, priority_level):
251
+ """Enhanced test case generation with more specific prompts"""
252
+
253
+ enhanced_prompt = f"""
254
+ As an expert QA engineer, create comprehensive and detailed test cases for the following requirements:
255
+
256
+ REQUIREMENTS:
257
+ {requirements}
258
+
259
+ INSTRUCTIONS:
260
+ - Generate {test_types} test scenarios
261
+ - Focus on {priority_level} priority tests
262
+ - Follow standard test case format exactly
263
+ - Include both positive and negative scenarios
264
+ - Consider edge cases and boundary conditions
265
+ - Make test steps clear and actionable
266
+
267
+ FORMAT EACH TEST CASE AS:
268
+ Test Case ID: TC_XXX
269
+ Test Case Title: [Clear, descriptive title]
270
+ Preconditions: [What must be true before testing]
271
+ Test Steps:
272
+ 1. [Clear step-by-step instructions]
273
+ 2. [Each step should be specific and actionable]
274
+ 3. [Include test data where applicable]
275
+ Expected Results: [What should happen when test passes]
276
+ Priority: [High/Medium/Low]
277
+ Test Data: [Specific data needed for testing]
278
+
279
+ Generate at least 5-8 comprehensive test cases covering different scenarios.
280
+ """
281
+
282
+ messages = [{"role": "user", "content": enhanced_prompt}]
283
+ response = call_openai_chat(messages, max_tokens=4000)
284
+
285
+ if "Error:" in response:
286
+ return response, None
287
+
288
+ df = parse_test_cases_to_dataframe(response)
289
+ csv_path = create_download_csv(df, "generated_test_cases")
290
+
291
+ return response, csv_path
292
+
293
+ def generate_test_cases_from_image(image, test_focus):
294
+ """Enhanced image-based test case generation"""
295
+
296
+ base64_image = encode_image(image)
297
+
298
+ enhanced_prompt = f"""
299
+ As an expert QA engineer, analyze this requirements image/mockup/wireframe and create comprehensive test cases.
300
+
301
+ FOCUS AREA: {test_focus}
302
+
303
+ INSTRUCTIONS:
304
+ - Examine all UI elements, workflows, and user interactions visible
305
+ - Consider usability, functionality, and user experience aspects
306
+ - Generate test cases for different user scenarios
307
+ - Include accessibility and responsive design considerations
308
+ - Cover both happy path and error scenarios
309
+
310
+ FORMAT EACH TEST CASE AS:
311
+ Test Case ID: TC_XXX
312
+ Test Case Title: [Clear, descriptive title]
313
+ Preconditions: [Setup requirements]
314
+ Test Steps:
315
+ 1. [Detailed step-by-step instructions]
316
+ 2. [Include specific UI elements to interact with]
317
+ 3. [Specify expected user actions]
318
+ Expected Results: [Expected behavior/outcome]
319
+ Priority: [High/Medium/Low based on business impact]
320
+ Test Data: [Required test data]
321
+
322
+ Generate comprehensive test cases covering all visible functionality.
323
+ """
324
+
325
+ messages = [
326
+ {
327
+ "role": "user",
328
+ "content": [
329
+ {"type": "text", "text": enhanced_prompt},
330
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
331
+ ]
332
+ }
333
+ ]
334
+
335
+ response = call_openai_chat(messages, model="gpt-4o-mini", max_tokens=4000)
336
+
337
+ if "Error:" in response:
338
+ return response, None
339
+
340
+ df = parse_test_cases_to_dataframe(response)
341
+ csv_path = create_download_csv(df, "image_based_test_cases")
342
+
343
+ return response, csv_path
344
+
345
+ def optimize_test_cases(existing_cases, focus_areas, optimization_goal):
346
+ """Enhanced test case optimization"""
347
+
348
+ focus_text = ", ".join(focus_areas) if focus_areas else "overall quality"
349
+
350
+ enhanced_prompt = f"""
351
+ As a senior QA engineer, optimize the following test cases with focus on {focus_text}.
352
+
353
+ OPTIMIZATION GOAL: {optimization_goal}
354
+
355
+ EXISTING TEST CASES:
356
+ {existing_cases}
357
+
358
+ OPTIMIZATION REQUIREMENTS:
359
+ 1. Improve clarity and specificity of test steps
360
+ 2. Enhance test data specifications
361
+ 3. Optimize test coverage and reduce redundancy
362
+ 4. Ensure traceability to requirements
363
+ 5. Improve maintainability and reusability
364
+ 6. Add risk-based prioritization
365
+ 7. Include automation feasibility assessment
366
+
367
+ PROVIDE:
368
+ 1. Optimized test cases in standard format
369
+ 2. Summary of improvements made
370
+ 3. Recommendations for test strategy
371
+ 4. Risk assessment and mitigation suggestions
372
+
373
+ FORMAT OPTIMIZED TEST CASES AS:
374
+ Test Case ID: TC_XXX
375
+ Test Case Title: [Improved title]
376
+ Preconditions: [Enhanced preconditions]
377
+ Test Steps: [Optimized steps with better clarity]
378
+ Expected Results: [More specific expected results]
379
+ Priority: [Risk-based priority]
380
+ Test Data: [Detailed test data specifications]
381
+ Automation Feasibility: [High/Medium/Low]
382
+ """
383
+
384
+ messages = [{"role": "user", "content": enhanced_prompt}]
385
+ response = call_openai_chat(messages, max_tokens=4000)
386
+
387
+ if "Error:" in response:
388
+ return response, None
389
+
390
+ df = parse_test_cases_to_dataframe(response)
391
+ csv_path = create_download_csv(df, "optimized_test_cases")
392
+
393
+ return response, csv_path
394
+
395
+ def answer_qa_question(test_cases_content, question, analysis_type):
396
+ """Enhanced Q&A with different analysis types"""
397
+
398
+ enhanced_prompt = f"""
399
+ As a QA expert, analyze the provided test cases and answer the following question with {analysis_type} analysis.
400
+
401
+ TEST CASES:
402
+ {test_cases_content}
403
+
404
+ QUESTION: {question}
405
+
406
+ ANALYSIS TYPE: {analysis_type}
407
+
408
+ INSTRUCTIONS:
409
+ - Provide detailed, actionable insights
410
+ - Reference specific test cases where relevant
411
+ - Include quantitative analysis where possible
412
+ - Suggest improvements or recommendations
413
+ - Consider industry best practices
414
+
415
+ If the question relates to:
416
+ - Coverage: Analyze what's covered and gaps
417
+ - Quality: Assess test case quality and completeness
418
+ - Strategy: Provide strategic recommendations
419
+ - Automation: Evaluate automation potential
420
+ - Risk: Identify and assess testing risks
421
+ """
422
+
423
+ messages = [{"role": "user", "content": enhanced_prompt}]
424
+ response = call_openai_chat(messages, max_tokens=3000)
425
+
426
+ return response
427
+
428
+ def fetch_swagger_spec(url):
429
+ """Enhanced Swagger spec fetching with better error handling"""
430
+ try:
431
+ headers = {
432
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36',
433
+ 'Accept': 'application/json, application/yaml, text/yaml, */*'
434
+ }
435
+
436
+ response = requests.get(url, timeout=30, headers=headers)
437
+ response.raise_for_status()
438
+
439
+ content_type = response.headers.get('content-type', '').lower()
440
+
441
+ if 'yaml' in content_type or url.endswith(('.yaml', '.yml')):
442
+ return yaml.safe_load(response.text)
443
+ else:
444
+ return response.json()
445
+
446
+ except requests.exceptions.Timeout:
447
+ return {"error": "Request timeout - URL took too long to respond"}
448
+ except requests.exceptions.ConnectionError:
449
+ return {"error": "Connection error - Unable to reach the URL"}
450
+ except requests.exceptions.HTTPError as e:
451
+ return {"error": f"HTTP error {e.response.status_code}: {e.response.reason}"}
452
+ except yaml.YAMLError as e:
453
+ return {"error": f"YAML parsing error: {str(e)}"}
454
+ except json.JSONDecodeError as e:
455
+ return {"error": f"JSON parsing error: {str(e)}"}
456
+ except Exception as e:
457
+ return {"error": f"Unexpected error: {str(e)}"}
458
+
459
+ def generate_api_test_cases(swagger_url, endpoints_filter, test_types, include_security):
460
+ """Enhanced API test case generation"""
461
+
462
+ spec = fetch_swagger_spec(swagger_url)
463
+
464
+ if "error" in spec:
465
+ return f"Error fetching Swagger spec: {spec['error']}", None
466
+
467
+ # Limit spec size for prompt
468
+ spec_summary = json.dumps(spec, indent=2)[:8000] + "..." if len(json.dumps(spec)) > 8000 else json.dumps(spec, indent=2)
469
+
470
+ security_instruction = "\n- Include security testing scenarios (authentication, authorization, input validation)" if include_security else ""
471
+
472
+ enhanced_prompt = f"""
473
+ As an API testing expert, generate comprehensive test cases for the following OpenAPI/Swagger specification:
474
+
475
+ SWAGGER SPECIFICATION:
476
+ {spec_summary}
477
+
478
+ FILTER: {endpoints_filter if endpoints_filter else "All endpoints"}
479
+ TEST TYPES: {", ".join(test_types)}
480
+
481
+ INSTRUCTIONS:
482
+ - Create detailed test cases for each endpoint
483
+ - Include positive, negative, and boundary test scenarios
484
+ - Cover different HTTP methods and status codes
485
+ - Include request/response validation
486
+ - Test error handling and edge cases{security_instruction}
487
+ - Consider API rate limiting and performance
488
+
489
+ FORMAT EACH API TEST CASE AS:
490
+ Test Case ID: API_TC_XXX
491
+ HTTP Method: [GET/POST/PUT/DELETE]
492
+ Endpoint: [Full endpoint path]
493
+ Test Description: [What this test validates]
494
+ Request Headers: [Required headers with examples]
495
+ Request Body: [JSON payload if applicable]
496
+ Expected Status Code: [HTTP status code]
497
+ Expected Response: [Expected response structure/content]
498
+ Test Category: [Functional/Security/Performance/Negative]
499
+ Test Data: [Specific test data requirements]
500
+
501
+ Generate comprehensive test coverage for the API.
502
+ """
503
+
504
+ messages = [{"role": "user", "content": enhanced_prompt}]
505
+ response = call_openai_chat(messages, max_tokens=5000)
506
+
507
+ if "Error:" in response:
508
+ return response, None
509
+
510
+ df = parse_api_tests_to_dataframe(response)
511
+ csv_path = create_download_csv(df, "api_test_cases")
512
+
513
+ return response, csv_path
514
+
515
+ def generate_automation_code(manual_tests, framework, language, include_reporting):
516
+ """Enhanced automation code generation"""
517
+
518
+ reporting_instruction = "\n- Include test reporting and logging mechanisms" if include_reporting else ""
519
+
520
+ enhanced_prompt = f"""
521
+ As a test automation expert, convert the following manual test cases into production-ready automation code.
522
+
523
+ MANUAL TEST CASES:
524
+ {manual_tests}
525
+
526
+ FRAMEWORK: {framework}
527
+ LANGUAGE: {language}
528
+
529
+ REQUIREMENTS:
530
+ - Generate complete, executable automation code
531
+ - Follow best practices and design patterns
532
+ - Include proper error handling and assertions
533
+ - Implement page object model (if applicable)
534
+ - Add configuration management
535
+ - Include setup and teardown methods
536
+ - Use appropriate wait strategies
537
+ - Implement data-driven testing approaches{reporting_instruction}
538
+ - Add meaningful comments and documentation
539
+ - Include dependency management (requirements/package files)
540
+
541
+ DELIVERABLES:
542
+ 1. Main test file with complete implementation
543
+ 2. Configuration file (if applicable)
544
+ 3. Requirements/dependencies file
545
+ 4. README with setup instructions
546
+ 5. Best practices documentation
547
+
548
+ Generate production-ready, maintainable automation code.
549
+ """
550
+
551
+ messages = [{"role": "user", "content": enhanced_prompt}]
552
+ response = call_openai_chat(messages, max_tokens=5000)
553
+
554
+ # Create metadata DataFrame
555
+ automation_df = pd.DataFrame({
556
+ 'Framework': [framework],
557
+ 'Language': [language],
558
+ 'Code_Lines': [len(response.split('\n'))],
559
+ 'Generated_Date': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
560
+ 'Include_Reporting': [include_reporting],
561
+ 'Estimated_Setup_Time': ['30-60 minutes'],
562
+ 'Complexity': ['Medium' if len(response.split('\n')) > 100 else 'Low']
563
+ })
564
+
565
+ csv_path = create_download_csv(automation_df, "automation_metadata")
566
+
567
+ return response, csv_path
568
+
569
+ def compare_images(expected_image, actual_image, comparison_type, sensitivity):
570
+ """Enhanced visual comparison with sensitivity settings"""
571
+
572
+ expected_b64 = encode_image(expected_image)
573
+ actual_b64 = encode_image(actual_image)
574
+
575
+ sensitivity_instruction = {
576
+ "High": "Detect even minor differences in pixels, colors, and spacing",
577
+ "Medium": "Focus on noticeable differences that affect user experience",
578
+ "Low": "Only report significant differences that impact functionality"
579
+ }
580
+
581
+ enhanced_prompt = f"""
582
+ As a visual testing expert, perform a detailed {comparison_type} between these two images.
583
+
584
+ COMPARISON TYPE: {comparison_type}
585
+ SENSITIVITY: {sensitivity} - {sensitivity_instruction[sensitivity]}
586
+
587
+ The first image is the expected result, the second is the actual result.
588
+
589
+ ANALYSIS REQUIREMENTS:
590
+ 1. Overall Pass/Fail determination
591
+ 2. Specific differences with locations and descriptions
592
+ 3. Similarity percentage calculation
593
+ 4. Impact assessment (High/Medium/Low) for each difference
594
+ 5. Root cause analysis for major differences
595
+ 6. Recommendations for fixing issues
596
+ 7. Areas that match perfectly
597
+ 8. Suggestions for improving visual test stability
598
+
599
+ FOCUS AREAS:
600
+ - Layout and positioning accuracy
601
+ - Color consistency and contrast
602
+ - Text rendering and typography
603
+ - Image quality and resolution
604
+ - Responsive design elements
605
+ - Cross-browser compatibility indicators
606
+
607
+ Provide actionable insights for the development team.
608
+ """
609
+
610
+ messages = [
611
+ {
612
+ "role": "user",
613
+ "content": [
614
+ {"type": "text", "text": enhanced_prompt},
615
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{expected_b64}"}},
616
+ {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{actual_b64}"}}
617
+ ]
618
+ }
619
+ ]
620
+
621
+ response = call_openai_chat(messages, model="gpt-4o-mini", max_tokens=3000)
622
+
623
+ # Create comparison summary DataFrame
624
+ comparison_df = pd.DataFrame({
625
+ 'Comparison_Type': [comparison_type],
626
+ 'Sensitivity': [sensitivity],
627
+ 'Timestamp': [datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
628
+ 'Expected_Image_Size': [f"{expected_image.size[0]}x{expected_image.size[1]}"],
629
+ 'Actual_Image_Size': [f"{actual_image.size[0]}x{actual_image.size[1]}"],
630
+ 'Analysis_Length': [len(response)],
631
+ 'Status': ['Completed']
632
+ })
633
+
634
+ csv_path = create_download_csv(comparison_df, "visual_comparison_summary")
635
+
636
+ return response, csv_path
637
+
638
+ # Create Gradio interface
639
+ def create_gradio_interface():
640
+ with gr.Blocks(title="πŸ§ͺ AI Testing Magic", theme=gr.themes.Soft()) as app:
641
+
642
+ gr.Markdown("""
643
+ # πŸ§ͺ AI Testing Magic
644
+ ### Bringing magic to every phase of software testing! 🌟
645
+
646
+ Choose a testing tool from the tabs below to get started.
647
+ """)
648
+
649
+ with gr.Tabs():
650
+
651
+ # Test Case Creation Tab
652
+ with gr.TabItem("πŸ“ Create Test Cases"):
653
+ gr.Markdown("### Create comprehensive test cases from requirements")
654
+
655
+ with gr.Tabs():
656
+ with gr.TabItem("Text Requirements"):
657
+ with gr.Row():
658
+ with gr.Column():
659
+ requirements_input = gr.Textbox(
660
+ label="Requirements",
661
+ placeholder="Enter your requirements here...",
662
+ lines=8
663
+ )
664
+ test_types = gr.Dropdown(
665
+ choices=["Functional Tests", "Integration Tests", "Regression Tests", "User Acceptance Tests", "All Types"],
666
+ value="Functional Tests",
667
+ label="Test Types"
668
+ )
669
+ priority_level = gr.Dropdown(
670
+ choices=["High Priority", "Medium Priority", "Low Priority", "All Priorities"],
671
+ value="All Priorities",
672
+ label="Priority Focus"
673
+ )
674
+ generate_btn = gr.Button("✨ Generate Test Cases", variant="primary")
675
+
676
+ with gr.Column():
677
+ test_cases_output = gr.Textbox(
678
+ label="Generated Test Cases",
679
+ lines=15,
680
+ max_lines=20
681
+ )
682
+ csv_download = gr.File(label="Download CSV")
683
+
684
+ generate_btn.click(
685
+ fn=generate_test_cases_from_text,
686
+ inputs=[requirements_input, test_types, priority_level],
687
+ outputs=[test_cases_output, csv_download]
688
+ )
689
+
690
+ with gr.TabItem("Image Requirements"):
691
+ with gr.Row():
692
+ with gr.Column():
693
+ image_input = gr.Image(
694
+ label="Upload Requirements Image",
695
+ type="pil"
696
+ )
697
+ test_focus = gr.Dropdown(
698
+ choices=["UI/UX Testing", "Functional Testing", "Usability Testing", "Accessibility Testing", "All Areas"],
699
+ value="All Areas",
700
+ label="Test Focus"
701
+ )
702
+ generate_img_btn = gr.Button("✨ Generate Test Cases from Image", variant="primary")
703
+
704
+ with gr.Column():
705
+ img_test_cases_output = gr.Textbox(
706
+ label="Generated Test Cases",
707
+ lines=15,
708
+ max_lines=20
709
+ )
710
+ img_csv_download = gr.File(label="Download CSV")
711
+
712
+ generate_img_btn.click(
713
+ fn=generate_test_cases_from_image,
714
+ inputs=[image_input, test_focus],
715
+ outputs=[img_test_cases_output, img_csv_download]
716
+ )
717
+
718
+ # Test Case Optimization Tab
719
+ with gr.TabItem("⚑ Optimize Test Cases"):
720
+ gr.Markdown("### Review and refine test cases for maximum effectiveness")
721
+
722
+ with gr.Row():
723
+ with gr.Column():
724
+ existing_cases_input = gr.Textbox(
725
+ label="Existing Test Cases",
726
+ placeholder="Paste your existing test cases here...",
727
+ lines=10
728
+ )
729
+ focus_areas = gr.CheckboxGroup(
730
+ choices=["Clarity", "Completeness", "Coverage", "Efficiency", "Maintainability", "Edge Cases", "Risk Assessment"],
731
+ value=["Clarity", "Coverage"],
732
+ label="Optimization Focus Areas"
733
+ )
734
+ optimization_goal = gr.Dropdown(
735
+ choices=["Improve Test Quality", "Reduce Test Execution Time", "Enhance Coverage", "Better Maintainability", "Risk-Based Optimization"],
736
+ value="Improve Test Quality",
737
+ label="Optimization Goal"
738
+ )
739
+ optimize_btn = gr.Button("πŸš€ Optimize Test Cases", variant="primary")
740
+
741
+ with gr.Column():
742
+ optimized_output = gr.Textbox(
743
+ label="Optimized Test Cases",
744
+ lines=15,
745
+ max_lines=20
746
+ )
747
+ opt_csv_download = gr.File(label="Download Optimized CSV")
748
+
749
+ optimize_btn.click(
750
+ fn=optimize_test_cases,
751
+ inputs=[existing_cases_input, focus_areas, optimization_goal],
752
+ outputs=[optimized_output, opt_csv_download]
753
+ )
754
+
755
+ # Q&A Assistant Tab
756
+ with gr.TabItem("❓ Q&A Assistant"):
757
+ gr.Markdown("### Ask questions about your test cases and get expert insights")
758
+
759
+ with gr.Row():
760
+ with gr.Column():
761
+ test_cases_file = gr.File(
762
+ label="Upload Test Cases File (TXT, CSV, JSON)",
763
+ file_types=[".txt", ".csv", ".json"]
764
+ )
765
+ test_cases_text = gr.Textbox(
766
+ label="Or Paste Test Cases Here",
767
+ placeholder="Paste your test cases...",
768
+ lines=8
769
+ )
770
+ question_input = gr.Textbox(
771
+ label="Your Question",
772
+ placeholder="e.g., What test cases cover the login functionality?",
773
+ lines=2
774
+ )
775
+ analysis_type = gr.Dropdown(
776
+ choices=["Coverage Analysis", "Quality Assessment", "Strategy Recommendations", "Automation Feasibility", "Risk Analysis"],
777
+ value="Coverage Analysis",
778
+ label="Analysis Type"
779
+ )
780
+ qa_btn = gr.Button("πŸ” Get Answer", variant="primary")
781
+
782
+ with gr.Column():
783
+ qa_output = gr.Textbox(
784
+ label="Expert Answer",
785
+ lines=15,
786
+ max_lines=20
787
+ )
788
+
789
+ def process_qa(file, text, question, analysis):
790
+ content = text
791
+ if file:
792
+ try:
793
+ if file.name.endswith('.csv'):
794
+ df = pd.read_csv(file.name)
795
+ content = df.to_string()
796
+ else:
797
+ with open(file.name, 'r') as f:
798
+ content = f.read()
799
+ except Exception as e:
800
+ content = f"Error reading file: {str(e)}"
801
+
802
+ return answer_qa_question(content, question, analysis)
803
+
804
+ qa_btn.click(
805
+ fn=process_qa,
806
+ inputs=[test_cases_file, test_cases_text, question_input, analysis_type],
807
+ outputs=[qa_output]
808
+ )
809
+
810
+ # API Test Cases Tab
811
+ with gr.TabItem("πŸ”— API Test Cases"):
812
+ gr.Markdown("### Generate comprehensive API test cases from Swagger/OpenAPI specifications")
813
+
814
+ with gr.Row():
815
+ with gr.Column():
816
+ swagger_url = gr.Textbox(
817
+ label="Swagger/OpenAPI URL",
818
+ placeholder="https://petstore.swagger.io/v2/swagger.json",
819
+ lines=1
820
+ )
821
+ endpoints_filter = gr.Textbox(
822
+ label="Filter Endpoints (Optional)",
823
+ placeholder="e.g., /users, /pets, /orders",
824
+ lines=1
825
+ )
826
+ api_test_types = gr.CheckboxGroup(
827
+ choices=["Positive Tests", "Negative Tests", "Boundary Tests", "Security Tests", "Performance Tests"],
828
+ value=["Positive Tests", "Negative Tests"],
829
+ label="Test Types"
830
+ )
831
+ include_security = gr.Checkbox(
832
+ label="Include Security Testing",
833
+ value=True
834
+ )
835
+ api_generate_btn = gr.Button("πŸš€ Generate API Test Cases", variant="primary")
836
+
837
+ with gr.Column():
838
+ api_output = gr.Textbox(
839
+ label="Generated API Test Cases",
840
+ lines=15,
841
+ max_lines=20
842
+ )
843
+ api_csv_download = gr.File(label="Download API Tests CSV")
844
+
845
+ api_generate_btn.click(
846
+ fn=generate_api_test_cases,
847
+ inputs=[swagger_url, endpoints_filter, api_test_types, include_security],
848
+ outputs=[api_output, api_csv_download]
849
+ )
850
+
851
+ # Automation Code Tab
852
+ with gr.TabItem("πŸ€– Automate Manual Tests"):
853
+ gr.Markdown("### Convert manual test cases into production-ready automation code")
854
+
855
+ with gr.Row():
856
+ with gr.Column():
857
+ manual_tests = gr.Textbox(
858
+ label="Manual Test Cases",
859
+ placeholder="Paste your manual test cases here...",
860
+ lines=10
861
+ )
862
+ automation_framework = gr.Dropdown(
863
+ choices=[
864
+ "Selenium WebDriver (Python)",
865
+ "Playwright (Python)",
866
+ "Cypress (JavaScript)",
867
+ "Selenium WebDriver (Java)",
868
+ "RestAssured (Java)",
869
+ "TestNG (Java)",
870
+ "PyTest (Python)",
871
+ "Robot Framework"
872
+ ],
873
+ value="Selenium WebDriver (Python)",
874
+ label="Automation Framework"
875
+ )
876
+ programming_language = gr.Dropdown(
877
+ choices=["Python", "JavaScript", "Java", "C#", "TypeScript"],
878
+ value="Python",
879
+ label="Programming Language"
880
+ )
881
+ include_reporting = gr.Checkbox(
882
+ label="Include Test Reporting",
883
+ value=True
884
+ )
885
+ automation_btn = gr.Button("πŸ”§ Generate Automation Code", variant="primary")
886
+
887
+ with gr.Column():
888
+ automation_output = gr.Code(
889
+ label="Generated Automation Code",
890
+ language="python",
891
+ lines=15
892
+ )
893
+ automation_csv_download = gr.File(label="Download Metadata CSV")
894
+
895
+ def update_code_language(lang):
896
+ lang_map = {
897
+ "Python": "python",
898
+ "JavaScript": "javascript",
899
+ "Java": "java",
900
+ "C#": "csharp",
901
+ "TypeScript": "typescript"
902
+ }
903
+ return gr.Code(language=lang_map.get(lang, "python"))
904
+
905
+ programming_language.change(
906
+ fn=update_code_language,
907
+ inputs=[programming_language],
908
+ outputs=[automation_output]
909
+ )
910
+
911
+ automation_btn.click(
912
+ fn=generate_automation_code,
913
+ inputs=[manual_tests, automation_framework, programming_language, include_reporting],
914
+ outputs=[automation_output, automation_csv_download]
915
+ )
916
+
917
+ # Visual Validation Tab
918
+ with gr.TabItem("πŸ‘οΈ Visual Validation"):
919
+ gr.Markdown("### Compare expected and actual images with AI-powered analysis")
920
+
921
+ with gr.Row():
922
+ with gr.Column():
923
+ expected_image = gr.Image(
924
+ label="Expected Image",
925
+ type="pil"
926
+ )
927
+ actual_image = gr.Image(
928
+ label="Actual Image",
929
+ type="pil"
930
+ )
931
+ comparison_type = gr.Dropdown(
932
+ choices=["Layout Comparison", "Color Comparison", "Text Comparison", "Complete UI Comparison", "Responsive Design Check"],
933
+ value="Complete UI Comparison",
934
+ label="Comparison Type"
935
+ )
936
+ sensitivity = gr.Dropdown(
937
+ choices=["High", "Medium", "Low"],
938
+ value="Medium",
939
+ label="Detection Sensitivity"
940
+ )
941
+ visual_btn = gr.Button("πŸ” Compare Images", variant="primary")
942
+
943
+ with gr.Column():
944
+ visual_output = gr.Textbox(
945
+ label="Comparison Results",
946
+ lines=15,
947
+ max_lines=20
948
+ )
949
+ visual_csv_download = gr.File(label="Download Comparison Summary CSV")
950
+
951
+ visual_btn.click(
952
+ fn=compare_images,
953
+ inputs=[expected_image, actual_image, comparison_type, sensitivity],
954
+ outputs=[visual_output, visual_csv_download]
955
+ )
956
+
957
+ # Footer with enhanced information
958
+ gr.Markdown("""
959
+ ---
960
+ ## 🌟 Enhanced Features
961
+
962
+ ### βœ… **Streamlined Test Case Creation**
963
+ - Create test cases from text requirements with priority and type selection
964
+ - Generate test cases from UI mockups and wireframes using AI vision
965
+ - Enhanced parsing with better accuracy and structured output
966
+
967
+ ### ⚑ **Advanced Test Case Optimization**
968
+ - Multi-dimensional optimization focusing on specific quality areas
969
+ - Risk-based prioritization and automation feasibility assessment
970
+ - Detailed improvement recommendations and best practices
971
+
972
+ ### ❓ **Intelligent Q&A Assistant**
973
+ - Multiple analysis types: coverage, quality, strategy, automation, risk
974
+ - Support for various file formats and intelligent content parsing
975
+ - Expert-level insights with actionable recommendations
976
+
977
+ ### πŸ”— **Comprehensive API Test Generation**
978
+ - Enhanced Swagger/OpenAPI parsing with better error handling
979
+ - Security testing scenarios and performance considerations
980
+ - Multiple test types with detailed request/response validation
981
+
982
+ ### πŸ€– **Production-Ready Automation Code**
983
+ - Support for modern frameworks and best practices
984
+ - Complete project structure with configuration and dependencies
985
+ - Test reporting integration and maintainable code patterns
986
+
987
+ ### πŸ‘οΈ **Advanced Visual Validation**
988
+ - Multiple comparison types with configurable sensitivity
989
+ - Detailed difference analysis with impact assessment
990
+ - Cross-browser and responsive design considerations
991
+
992
+ ### πŸ“Š **Enhanced Data Export**
993
+ - Structured CSV exports with timestamps for all features
994
+ - Comprehensive metadata tracking and version control
995
+ - Professional reporting formats for stakeholder communication
996
+
997
+ ---
998
+ *Made with ❀️ using Gradio and OpenAI GPT-4 | Enhanced with better prompts and accuracy*
999
+ """)
1000
+
1001
+ return app
1002
+
1003
+ # Launch the application
1004
+ if __name__ == "__main__":
1005
+ app = create_gradio_interface()
1006
+ app.launch(
1007
+ server_name="0.0.0.0",
1008
+ server_port=7860,
1009
+ share=True,
1010
+ debug=True
1011
+ )