mfrng commited on
Commit
816e26f
·
verified ·
1 Parent(s): e0d7124

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ import openai # Can be replaced with another AI API
4
+ import os
5
+ import subprocess
6
+
7
+ app = FastAPI()
8
+
9
+ # AI Model Selection
10
+ MODEL_MAP = {
11
+ "gpt": "gpt-4",
12
+ "gemini": "gemini-1",
13
+ "custom": "http://custom-llm-api"
14
+ }
15
+
16
+ class TestRequest(BaseModel):
17
+ model: str
18
+ prompt: str
19
+
20
+ @app.post("/generate_test")
21
+ def generate_test(request: TestRequest):
22
+ """Generate test script using the selected AI model."""
23
+ model_choice = request.model
24
+ prompt = request.prompt
25
+
26
+ model_api = MODEL_MAP.get(model_choice, "gpt-4")
27
+ try:
28
+ response = openai.ChatCompletion.create(
29
+ model=model_api,
30
+ messages=[{"role": "system", "content": prompt}]
31
+ )
32
+ test_script = response["choices"][0]["message"]["content"]
33
+
34
+ # Save the generated test script
35
+ with open("generated_test.py", "w") as f:
36
+ f.write(test_script)
37
+
38
+ return {"message": "Test generated", "script": test_script}
39
+
40
+ except Exception as e:
41
+ raise HTTPException(status_code=500, detail=str(e))
42
+
43
+ @app.post("/run_tests")
44
+ def run_tests():
45
+ """Execute AI-generated test scripts and return results."""
46
+ try:
47
+ result = subprocess.run(["pytest", "generated_test.py", "--json-report"], capture_output=True, text=True)
48
+ return {"output": result.stdout, "error": result.stderr}
49
+ except Exception as e:
50
+ raise HTTPException(status_code=500, detail=str(e))
51
+
52
+ # Run with: uvicorn app:app --host 0.0.0.0 --port 7860