ivmpfa commited on
Commit
4d8c4fd
·
verified ·
1 Parent(s): 6818f4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -27
app.py CHANGED
@@ -1,50 +1,33 @@
1
  import gradio as gr
2
  from transformers import pipeline
3
- import torch
4
 
5
  # Load GPT-2 with optimized parameters
6
  model = pipeline(
7
  "text-generation",
8
  model="gpt2",
9
- max_length=300, # Increased length to allow more detailed output
10
- temperature=0.7, # Balance between creativity and stability
11
- early_stopping=True, # Stop generation early if completed
12
  torch_dtype=torch.float32 # CPU compatibility
13
  )
14
 
15
- def generate_test_cases(requirement):
16
- # Revised prompt to focus on the input requirement and avoid copying the example
17
- prompt = f"""
18
- Generate test cases for the requirement: '{requirement}' in JSON array format.
19
- Each test case must include "id", "title", "steps" (array of strings), and "expected_result".
20
-
21
- DO NOT use the example below in your response. Output only the JSON array, no explanations.
22
-
23
- Example format (IGNORE this when generating):
24
- [
25
- {{
26
- "id": 1,
27
- "title": "Example Test",
28
- "steps": ["Step 1", "Step 2"],
29
- "expected_result": "Expected outcome"
30
- }}
31
- ]
32
- """
33
-
34
  try:
35
  with torch.no_grad():
36
- result = model(prompt, max_time=15)[0]["generated_text"]
37
  return result.strip()
38
  except Exception as e:
39
  return f"Error: {str(e)}"
40
 
41
  # Create Gradio interface
42
  demo = gr.Interface(
43
- fn=generate_test_cases,
44
  inputs="text",
45
  outputs="text",
46
- title="Test Case Generator",
47
- description="Enter a requirement to generate test cases.",
48
  flagging_mode="never"
49
  )
50
 
 
1
  import gradio as gr
2
  from transformers import pipeline
 
3
 
4
  # Load GPT-2 with optimized parameters
5
  model = pipeline(
6
  "text-generation",
7
  model="gpt2",
8
+ max_length=200, # Maximum response length
9
+ temperature=0.7, # Creativity (0.0 = deterministic, 1.0 = random)
10
+ early_stopping=True, # Stop generation early if possible
11
  torch_dtype=torch.float32 # CPU compatibility
12
  )
13
 
14
+ def chat_with_gpt2(user_input):
15
+ # Generate a response based on user input
16
+ prompt = f"{user_input}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  try:
18
  with torch.no_grad():
19
+ result = model(prompt, max_time=10)[0]["generated_text"]
20
  return result.strip()
21
  except Exception as e:
22
  return f"Error: {str(e)}"
23
 
24
  # Create Gradio interface
25
  demo = gr.Interface(
26
+ fn=chat_with_gpt2,
27
  inputs="text",
28
  outputs="text",
29
+ title="GPT-2 Chat Interface",
30
+ description="Type any message to chat with GPT-2.",
31
  flagging_mode="never"
32
  )
33