ivmpfa commited on
Commit
55c6272
·
verified ·
1 Parent(s): 582e2f6
Files changed (1) hide show
  1. app.py +10 -3
app.py CHANGED
@@ -1,13 +1,20 @@
1
  from transformers import pipeline
2
  import torch # Import torch library
3
 
 
4
  model = pipeline(
5
  "text-generation",
6
- model="meta-llama/llama-2-7b-hf",
7
  torch_dtype=torch.float16 # Reduce memory usage
8
  )
9
 
10
  def generate_test_cases(requirement):
11
- prompt = f"Generate test cases for '{requirement}' in JSON format. Output only the array."
12
- result = model(prompt, max_length=300)[0]["generated_text"]
 
13
  return result.strip()
 
 
 
 
 
 
1
  from transformers import pipeline
2
  import torch # Import torch library
3
 
4
+ # Use the GPT-Neo model, a larger and more capable alternative to GPT-2
5
  model = pipeline(
6
  "text-generation",
7
+ model="EleutherAI/gpt-neo-1.3B", # GPT-Neo 1.3B model
8
  torch_dtype=torch.float16 # Reduce memory usage
9
  )
10
 
11
  def generate_test_cases(requirement):
12
+ # Creating a prompt for generating test cases
13
+ prompt = f"Generate test cases for the following software requirement in JSON format: '{requirement}'. Only provide the JSON array of test cases."
14
+ result = model(prompt, max_length=300, num_return_sequences=1)[0]["generated_text"]
15
  return result.strip()
16
+
17
+ # Example usage
18
+ if __name__ == "__main__":
19
+ requirement = "User login functionality"
20
+ print(generate_test_cases(requirement))