ivmpfa commited on
Commit
6818f4b
·
verified ·
1 Parent(s): 1b6bd65

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -17
app.py CHANGED
@@ -6,36 +6,34 @@ import torch
6
  model = pipeline(
7
  "text-generation",
8
  model="gpt2",
9
- max_length=200,
10
- temperature=0.7,
11
- early_stopping=True,
12
- torch_dtype=torch.float32 # Use float32 for CPU compatibility
13
  )
14
 
15
  def generate_test_cases(requirement):
 
16
  prompt = f"""
17
- Generate test cases for '{requirement}' in JSON format. Output only the array, take the below as example but return testcases for the specified requirement.
18
-
19
- Example format:
 
 
 
20
  [
21
  {{
22
  "id": 1,
23
- "title": "Valid Login",
24
- "steps": ["Enter valid email", "Enter valid password", "Click login"],
25
- "expected_result": "Redirect to dashboard"
26
- }},
27
- {{
28
- "id": 2,
29
- "title": "Invalid Password",
30
- "steps": ["Enter valid email", "Enter wrong password", "Click login"],
31
- "expected_result": "Error message displayed"
32
  }}
33
  ]
34
  """
35
 
36
  try:
37
  with torch.no_grad():
38
- result = model(prompt, max_time=10)[0]["generated_text"]
39
  return result.strip()
40
  except Exception as e:
41
  return f"Error: {str(e)}"
 
6
  model = pipeline(
7
  "text-generation",
8
  model="gpt2",
9
+ max_length=300, # Increased length to allow more detailed output
10
+ temperature=0.7, # Balance between creativity and stability
11
+ early_stopping=True, # Stop generation early if completed
12
+ torch_dtype=torch.float32 # CPU compatibility
13
  )
14
 
15
  def generate_test_cases(requirement):
16
+ # Revised prompt to focus on the input requirement and avoid copying the example
17
  prompt = f"""
18
+ Generate test cases for the requirement: '{requirement}' in JSON array format.
19
+ Each test case must include "id", "title", "steps" (array of strings), and "expected_result".
20
+
21
+ DO NOT use the example below in your response. Output only the JSON array, no explanations.
22
+
23
+ Example format (IGNORE this when generating):
24
  [
25
  {{
26
  "id": 1,
27
+ "title": "Example Test",
28
+ "steps": ["Step 1", "Step 2"],
29
+ "expected_result": "Expected outcome"
 
 
 
 
 
 
30
  }}
31
  ]
32
  """
33
 
34
  try:
35
  with torch.no_grad():
36
+ result = model(prompt, max_time=15)[0]["generated_text"]
37
  return result.strip()
38
  except Exception as e:
39
  return f"Error: {str(e)}"