mfrng commited on
Commit
4db9bec
·
verified ·
1 Parent(s): f561fe7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -23
app.py CHANGED
@@ -1,11 +1,8 @@
1
- from fastapi import FastAPI, HTTPException
2
- from pydantic import BaseModel
3
- import openai # Can be replaced with another AI API
4
  import os
5
  import subprocess
6
 
7
- app = FastAPI()
8
-
9
  # AI Model Selection
10
  MODEL_MAP = {
11
  "gpt": "gpt-4",
@@ -13,40 +10,48 @@ MODEL_MAP = {
13
  "custom": "http://custom-llm-api"
14
  }
15
 
16
- class TestRequest(BaseModel):
17
- model: str
18
- prompt: str
19
-
20
- @app.post("/generate_test")
21
- def generate_test(request: TestRequest):
22
  """Generate test script using the selected AI model."""
23
- model_choice = request.model
24
- prompt = request.prompt
25
-
26
- model_api = MODEL_MAP.get(model_choice, "gpt-4")
27
  try:
28
  response = openai.ChatCompletion.create(
29
  model=model_api,
30
  messages=[{"role": "system", "content": prompt}]
31
  )
32
  test_script = response["choices"][0]["message"]["content"]
33
-
34
  # Save the generated test script
35
  with open("generated_test.py", "w") as f:
36
  f.write(test_script)
37
 
38
- return {"message": "Test generated", "script": test_script}
39
-
40
  except Exception as e:
41
- raise HTTPException(status_code=500, detail=str(e))
42
 
43
- @app.post("/run_tests")
44
  def run_tests():
45
  """Execute AI-generated test scripts and return results."""
46
  try:
47
  result = subprocess.run(["pytest", "generated_test.py", "--json-report"], capture_output=True, text=True)
48
- return {"output": result.stdout, "error": result.stderr}
49
  except Exception as e:
50
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
- # Run with: uvicorn app:app --host 0.0.0.0 --port 7860
 
1
+ import gradio as gr
2
+ import openai # Can be replaced with another AI model API
 
3
  import os
4
  import subprocess
5
 
 
 
6
  # AI Model Selection
7
  MODEL_MAP = {
8
  "gpt": "gpt-4",
 
10
  "custom": "http://custom-llm-api"
11
  }
12
 
13
+ def generate_test(model, prompt):
 
 
 
 
 
14
  """Generate test script using the selected AI model."""
15
+ model_api = MODEL_MAP.get(model, "gpt-4")
 
 
 
16
  try:
17
  response = openai.ChatCompletion.create(
18
  model=model_api,
19
  messages=[{"role": "system", "content": prompt}]
20
  )
21
  test_script = response["choices"][0]["message"]["content"]
22
+
23
  # Save the generated test script
24
  with open("generated_test.py", "w") as f:
25
  f.write(test_script)
26
 
27
+ return test_script
 
28
  except Exception as e:
29
+ return f"Error: {str(e)}"
30
 
 
31
  def run_tests():
32
  """Execute AI-generated test scripts and return results."""
33
  try:
34
  result = subprocess.run(["pytest", "generated_test.py", "--json-report"], capture_output=True, text=True)
35
+ return result.stdout + "\n" + result.stderr
36
  except Exception as e:
37
+ return f"Error: {str(e)}"
38
+
39
+ # Gradio UI
40
+ with gr.Blocks() as app:
41
+ gr.Markdown("# AI Test Framework")
42
+
43
+ with gr.Row():
44
+ model = gr.Dropdown(["gpt", "gemini", "custom"], value="gpt", label="Choose AI Model")
45
+ prompt = gr.Textbox(label="Test Prompt", placeholder="Describe the test case...")
46
+
47
+ generate_button = gr.Button("Generate Test Script")
48
+ test_script_output = gr.Textbox(label="Generated Test Script", interactive=False)
49
+
50
+ generate_button.click(generate_test, inputs=[model, prompt], outputs=test_script_output)
51
+
52
+ run_tests_button = gr.Button("Run Tests")
53
+ test_results_output = gr.Textbox(label="Test Execution Results", interactive=False)
54
+
55
+ run_tests_button.click(run_tests, inputs=[], outputs=test_results_output)
56
 
57
+ app.launch()