hadokenvskikoken commited on
Commit
b26fccc
Β·
verified Β·
1 Parent(s): 4b8959a

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +156 -0
main.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import autopep8
6
+ import subprocess
7
+ import time
8
+ import re
9
+
10
+ app = FastAPI(title="Code Evaluation & Optimization API")
11
+
12
+ # --- Load AI Model ---
13
+ MODEL_NAME = "codellama/CodeLlama-7b-hf"
14
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
15
+ model = AutoModelForCausalLM.from_pretrained(
16
+ MODEL_NAME,
17
+ device_map="auto",
18
+ torch_dtype=torch.float16
19
+ )
20
+
21
+ # --- Request Models ---
22
+ class CodeRequest(BaseModel):
23
+ code: str
24
+ language: str = "python" # Default to Python
25
+
26
+ # --- Helper Functions ---
27
+ def detect_language(user_code: str) -> str:
28
+ """Detect programming language based on code patterns"""
29
+ patterns = {
30
+ "python": ["def ", "print(", "import "],
31
+ "java": ["public static void main", "System.out.println"],
32
+ "cpp": ["#include <iostream>", "cout <<"],
33
+ "javascript": ["function ", "console.log"]
34
+ }
35
+
36
+ for lang, keywords in patterns.items():
37
+ if any(keyword in user_code for keyword in keywords):
38
+ return lang
39
+ return "unknown"
40
+
41
+ def evaluate_code(user_code: str, lang: str) -> dict:
42
+ """Evaluate code for correctness, performance, and security"""
43
+ start_time = time.time()
44
+ file_ext = {"python": "py", "java": "java", "cpp": "cpp", "javascript": "js"}.get(lang, "txt")
45
+ filename = f"temp_script.{file_ext}"
46
+
47
+ # Save user code to a temporary file
48
+ with open(filename, "w") as f:
49
+ f.write(user_code)
50
+
51
+ commands = {
52
+ "python": ["python3", filename],
53
+ "java": ["javac", filename, "&&", "java", filename.replace(".java", "")],
54
+ "cpp": ["g++", filename, "-o", "temp_script.out", "&&", "./temp_script.out"],
55
+ "javascript": ["node", filename]
56
+ }
57
+
58
+ try:
59
+ if lang in commands:
60
+ result = subprocess.run(" ".join(commands[lang]),
61
+ capture_output=True,
62
+ text=True,
63
+ timeout=5,
64
+ shell=True)
65
+ exec_time = time.time() - start_time
66
+
67
+ correctness = 1 if result.returncode == 0 else 0
68
+ error_message = None if correctness else result.stderr.strip()
69
+ else:
70
+ return {"status": "error", "message": "Unsupported language", "score": 0}
71
+
72
+ except Exception as e:
73
+ return {"status": "error", "message": str(e), "score": 0}
74
+
75
+ # Scoring logic
76
+ readability_score = 20 if len(user_code) < 200 else 10
77
+ efficiency_score = 30 if exec_time < 1 else 10
78
+ security_score = 20 if "eval(" not in user_code and "exec(" not in user_code else 0
79
+
80
+ total_score = (correctness * 50) + readability_score + efficiency_score + security_score
81
+
82
+ feedback = []
83
+ if correctness == 0:
84
+ feedback.append("❌ Error in Code Execution! Check syntax or logic errors.")
85
+ feedback.append(f"πŸ“Œ Error Details: {error_message}")
86
+ else:
87
+ feedback.append("βœ… Code executed successfully!")
88
+
89
+ if efficiency_score < 30:
90
+ feedback.append("⚑ Performance Issue: Code took longer to execute. Optimize loops or calculations.")
91
+
92
+ if readability_score < 20:
93
+ feedback.append("πŸ“– Readability Issue: Code is lengthy. Break into smaller functions.")
94
+
95
+ if security_score == 0:
96
+ feedback.append("πŸ”’ Security Risk: Avoid using eval() or exec().")
97
+
98
+ return {
99
+ "status": "success" if correctness else "error",
100
+ "execution_time": round(exec_time, 3) if correctness else None,
101
+ "score": max(0, min(100, total_score)),
102
+ "feedback": "\n".join(feedback),
103
+ "error_details": error_message if not correctness else None
104
+ }
105
+
106
+ def optimize_code_ai(user_code: str, lang: str) -> str:
107
+ """Generate optimized code using AI and formatting"""
108
+ # Basic formatting first
109
+ if lang == "python":
110
+ user_code = autopep8.fix_code(user_code)
111
+ user_code = re.sub(r"eval\((.*)\)", r"int(\1) # Removed eval for security", user_code)
112
+ user_code = re.sub(r"/ 0", "/ 1 # Fixed division by zero", user_code)
113
+
114
+ # AI-powered optimization
115
+ prompt = f"Optimize this {lang} code for efficiency and security:\n```{lang}\n{user_code}\n```\nOptimized version:"
116
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
117
+
118
+ with torch.no_grad():
119
+ outputs = model.generate(**inputs, max_length=1024)
120
+
121
+ optimized_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
122
+
123
+ # Extract just the code block if LLM added explanation
124
+ code_match = re.search(r'```(?:python)?\n(.*?)\n```', optimized_code, re.DOTALL)
125
+ if code_match:
126
+ optimized_code = code_match.group(1)
127
+
128
+ return optimized_code if optimized_code else user_code
129
+
130
+ # --- API Endpoints ---
131
+ @app.post("/evaluate")
132
+ async def evaluate_endpoint(request: CodeRequest):
133
+ """Evaluate code for correctness and quality"""
134
+ try:
135
+ result = evaluate_code(request.code, request.language)
136
+ return {"status": "success", "result": result}
137
+ except Exception as e:
138
+ raise HTTPException(status_code=400, detail=str(e))
139
+
140
+ @app.post("/optimize")
141
+ async def optimize_endpoint(request: CodeRequest):
142
+ """Generate optimized version of the code"""
143
+ try:
144
+ optimized = optimize_code_ai(request.code, request.language)
145
+ return {"status": "success", "optimized_code": optimized}
146
+ except Exception as e:
147
+ raise HTTPException(status_code=400, detail=str(e))
148
+
149
+ @app.get("/")
150
+ def health_check():
151
+ return {"status": "Code Evaluation API is running!"}
152
+
153
+ # For local testing
154
+ if __name__ == "__main__":
155
+ import uvicorn
156
+ uvicorn.run(app, host="0.0.0.0", port=8000)