Spaces:
Build error
Build error
Update main.py
Browse files
main.py
CHANGED
|
@@ -9,6 +9,7 @@ import re
|
|
| 9 |
import os
|
| 10 |
from pathlib import Path
|
| 11 |
from fastapi.middleware.cors import CORSMiddleware
|
|
|
|
| 12 |
|
| 13 |
app = FastAPI(title="Code Evaluation & Optimization API")
|
| 14 |
|
|
@@ -21,14 +22,14 @@ app.add_middleware(
|
|
| 21 |
allow_headers=["*"],
|
| 22 |
)
|
| 23 |
|
| 24 |
-
# Environment Setup
|
| 25 |
-
CACHE_DIR = Path("/
|
| 26 |
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 27 |
os.environ["TRANSFORMERS_CACHE"] = str(CACHE_DIR)
|
| 28 |
os.environ["HF_HOME"] = str(CACHE_DIR)
|
| 29 |
|
| 30 |
-
# Load AI Model
|
| 31 |
-
MODEL_NAME = "codellama/CodeLlama-7b-hf"
|
| 32 |
|
| 33 |
try:
|
| 34 |
tokenizer = AutoTokenizer.from_pretrained(
|
|
@@ -40,94 +41,130 @@ try:
|
|
| 40 |
torch_dtype=torch.float16,
|
| 41 |
cache_dir=str(CACHE_DIR))
|
| 42 |
except Exception as e:
|
| 43 |
-
|
|
|
|
|
|
|
| 44 |
|
| 45 |
# Request Model
|
| 46 |
class CodeRequest(BaseModel):
|
| 47 |
code: str
|
| 48 |
language: str = "python"
|
| 49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
# Helper Functions
|
| 51 |
def evaluate_code(user_code: str, lang: str) -> dict:
|
| 52 |
"""Evaluate code for correctness, performance, and security"""
|
| 53 |
start_time = time.time()
|
| 54 |
file_ext = {"python": "py", "java": "java", "cpp": "cpp", "javascript": "js"}.get(lang, "txt")
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
"java": ["javac", filename, "&&", "java", filename.replace(".java", "")],
|
| 63 |
-
"cpp": ["g++", filename, "-o", "temp_script.out", "&&", "./temp_script.out"],
|
| 64 |
-
"javascript": ["node", filename]
|
| 65 |
-
}
|
| 66 |
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
else:
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
except Exception as e:
|
| 80 |
return {"status": "error", "message": str(e), "score": 0}
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
readability_score = 20 if len(user_code) < 200 else 10
|
| 84 |
-
efficiency_score = 30 if exec_time < 1 else 10
|
| 85 |
-
security_score = 20 if "eval(" not in user_code and "exec(" not in user_code else 0
|
| 86 |
-
total_score = (correctness * 50) + readability_score + efficiency_score + security_score
|
| 87 |
-
|
| 88 |
-
feedback = []
|
| 89 |
-
if correctness == 0:
|
| 90 |
-
feedback.append("β Error in Code Execution! Check syntax or logic errors.")
|
| 91 |
-
feedback.append(f"π Error Details: {error_message}")
|
| 92 |
-
else:
|
| 93 |
-
feedback.append("β
Code executed successfully!")
|
| 94 |
-
|
| 95 |
-
if efficiency_score < 30:
|
| 96 |
-
feedback.append("β‘ Performance Issue: Code took longer to execute. Optimize loops or calculations.")
|
| 97 |
-
if readability_score < 20:
|
| 98 |
-
feedback.append("π Readability Issue: Code is lengthy. Break into smaller functions.")
|
| 99 |
-
if security_score == 0:
|
| 100 |
-
feedback.append("π Security Risk: Avoid using eval() or exec().")
|
| 101 |
-
|
| 102 |
-
return {
|
| 103 |
-
"status": "success" if correctness else "error",
|
| 104 |
-
"execution_time": round(exec_time, 3) if correctness else None,
|
| 105 |
-
"score": max(0, min(100, total_score)),
|
| 106 |
-
"feedback": "\n".join(feedback),
|
| 107 |
-
"error_details": error_message if not correctness else None
|
| 108 |
-
}
|
| 109 |
|
| 110 |
def optimize_code_ai(user_code: str, lang: str) -> str:
|
| 111 |
"""Generate optimized code using AI"""
|
|
|
|
|
|
|
|
|
|
| 112 |
try:
|
| 113 |
if lang == "python":
|
| 114 |
user_code = autopep8.fix_code(user_code)
|
| 115 |
user_code = re.sub(r"eval\((.*)\)", r"int(\1) # Removed eval for security", user_code)
|
| 116 |
user_code = re.sub(r"/ 0", "/ 1 # Fixed division by zero", user_code)
|
| 117 |
|
| 118 |
-
prompt = f"Optimize this {lang} code
|
|
|
|
|
|
|
|
|
|
| 119 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 120 |
|
| 121 |
with torch.no_grad():
|
| 122 |
-
outputs = model.generate(**inputs, max_length=1024)
|
| 123 |
|
| 124 |
optimized_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 125 |
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
|
|
|
| 129 |
|
| 130 |
-
return optimized_code if optimized_code else user_code
|
| 131 |
except Exception as e:
|
| 132 |
raise HTTPException(status_code=500, detail=f"AI optimization failed: {str(e)}")
|
| 133 |
|
|
@@ -152,13 +189,17 @@ async def optimize_endpoint(request: CodeRequest):
|
|
| 152 |
def health_check():
|
| 153 |
return {
|
| 154 |
"status": "API is running",
|
| 155 |
-
"model": MODEL_NAME,
|
| 156 |
"endpoints": {
|
| 157 |
"evaluate": "POST /evaluate",
|
| 158 |
"optimize": "POST /optimize"
|
| 159 |
}
|
| 160 |
}
|
| 161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 162 |
if __name__ == "__main__":
|
| 163 |
import uvicorn
|
| 164 |
uvicorn.run("main:app", host="0.0.0.0", port=7860)
|
|
|
|
| 9 |
import os
|
| 10 |
from pathlib import Path
|
| 11 |
from fastapi.middleware.cors import CORSMiddleware
|
| 12 |
+
import tempfile
|
| 13 |
|
| 14 |
app = FastAPI(title="Code Evaluation & Optimization API")
|
| 15 |
|
|
|
|
| 22 |
allow_headers=["*"],
|
| 23 |
)
|
| 24 |
|
| 25 |
+
# Environment Setup - Modified for Hugging Face Spaces
|
| 26 |
+
CACHE_DIR = Path(os.getenv("HF_HOME", "/tmp/huggingface"))
|
| 27 |
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
| 28 |
os.environ["TRANSFORMERS_CACHE"] = str(CACHE_DIR)
|
| 29 |
os.environ["HF_HOME"] = str(CACHE_DIR)
|
| 30 |
|
| 31 |
+
# Load AI Model - Using smaller model for Spaces compatibility
|
| 32 |
+
MODEL_NAME = "codellama/CodeLlama-7b-instruct-hf" # More suitable for API use
|
| 33 |
|
| 34 |
try:
|
| 35 |
tokenizer = AutoTokenizer.from_pretrained(
|
|
|
|
| 41 |
torch_dtype=torch.float16,
|
| 42 |
cache_dir=str(CACHE_DIR))
|
| 43 |
except Exception as e:
|
| 44 |
+
print(f"Model loading warning: {str(e)}")
|
| 45 |
+
model = None
|
| 46 |
+
tokenizer = None
|
| 47 |
|
| 48 |
# Request Model
|
| 49 |
class CodeRequest(BaseModel):
|
| 50 |
code: str
|
| 51 |
language: str = "python"
|
| 52 |
|
| 53 |
+
def create_temp_file(code: str, extension: str) -> str:
|
| 54 |
+
"""Create temporary file in writable directory with proper permissions"""
|
| 55 |
+
temp_dir = "/tmp/code_files"
|
| 56 |
+
os.makedirs(temp_dir, exist_ok=True)
|
| 57 |
+
fd, path = tempfile.mkstemp(suffix=f".{extension}", dir=temp_dir)
|
| 58 |
+
with os.fdopen(fd, 'w') as tmp:
|
| 59 |
+
tmp.write(code)
|
| 60 |
+
os.chmod(path, 0o777) # Ensure executable permissions
|
| 61 |
+
return path
|
| 62 |
+
|
| 63 |
+
def cleanup_temp_files():
|
| 64 |
+
"""Clean up temporary files"""
|
| 65 |
+
temp_dir = "/tmp/code_files"
|
| 66 |
+
if os.path.exists(temp_dir):
|
| 67 |
+
for filename in os.listdir(temp_dir):
|
| 68 |
+
file_path = os.path.join(temp_dir, filename)
|
| 69 |
+
try:
|
| 70 |
+
if os.path.isfile(file_path):
|
| 71 |
+
os.unlink(file_path)
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"Error deleting {file_path}: {e}")
|
| 74 |
+
|
| 75 |
# Helper Functions
|
| 76 |
def evaluate_code(user_code: str, lang: str) -> dict:
|
| 77 |
"""Evaluate code for correctness, performance, and security"""
|
| 78 |
start_time = time.time()
|
| 79 |
file_ext = {"python": "py", "java": "java", "cpp": "cpp", "javascript": "js"}.get(lang, "txt")
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
filename = create_temp_file(user_code, file_ext)
|
| 83 |
+
|
| 84 |
+
commands = {
|
| 85 |
+
"python": ["python3", filename],
|
| 86 |
+
"java": ["javac", filename, "&&", "java", filename.replace(".java", "")],
|
| 87 |
+
"cpp": ["g++", filename, "-o", f"{filename}.out", "&&", f"./{filename}.out"],
|
| 88 |
+
"javascript": ["node", filename]
|
| 89 |
+
}
|
| 90 |
|
| 91 |
+
if lang not in commands:
|
| 92 |
+
return {"status": "error", "message": "Unsupported language", "score": 0}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
| 94 |
+
result = subprocess.run(
|
| 95 |
+
" ".join(commands[lang]),
|
| 96 |
+
capture_output=True,
|
| 97 |
+
text=True,
|
| 98 |
+
timeout=5,
|
| 99 |
+
shell=True
|
| 100 |
+
)
|
| 101 |
+
exec_time = time.time() - start_time
|
| 102 |
+
correctness = 1 if result.returncode == 0 else 0
|
| 103 |
+
error_message = None if correctness else result.stderr.strip()
|
| 104 |
+
|
| 105 |
+
# Scoring logic
|
| 106 |
+
readability_score = 20 if len(user_code) < 200 else 10
|
| 107 |
+
efficiency_score = 30 if exec_time < 1 else 10
|
| 108 |
+
security_score = 20 if "eval(" not in user_code and "exec(" not in user_code else 0
|
| 109 |
+
total_score = (correctness * 50) + readability_score + efficiency_score + security_score
|
| 110 |
+
|
| 111 |
+
feedback = []
|
| 112 |
+
if correctness == 0:
|
| 113 |
+
feedback.append("β Error in Code Execution! Check syntax or logic errors.")
|
| 114 |
+
feedback.append(f"π Error Details: {error_message}")
|
| 115 |
else:
|
| 116 |
+
feedback.append("β
Code executed successfully!")
|
| 117 |
+
|
| 118 |
+
if efficiency_score < 30:
|
| 119 |
+
feedback.append("β‘ Performance Issue: Code took longer to execute. Optimize loops or calculations.")
|
| 120 |
+
if readability_score < 20:
|
| 121 |
+
feedback.append("π Readability Issue: Code is lengthy. Break into smaller functions.")
|
| 122 |
+
if security_score == 0:
|
| 123 |
+
feedback.append("π Security Risk: Avoid using eval() or exec().")
|
| 124 |
+
|
| 125 |
+
return {
|
| 126 |
+
"status": "success" if correctness else "error",
|
| 127 |
+
"execution_time": round(exec_time, 3) if correctness else None,
|
| 128 |
+
"score": max(0, min(100, total_score)),
|
| 129 |
+
"feedback": "\n".join(feedback),
|
| 130 |
+
"error_details": error_message if not correctness else None
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
except subprocess.TimeoutExpired:
|
| 134 |
+
return {"status": "error", "message": "Execution timed out", "score": 0}
|
| 135 |
except Exception as e:
|
| 136 |
return {"status": "error", "message": str(e), "score": 0}
|
| 137 |
+
finally:
|
| 138 |
+
cleanup_temp_files()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
|
| 140 |
def optimize_code_ai(user_code: str, lang: str) -> str:
|
| 141 |
"""Generate optimized code using AI"""
|
| 142 |
+
if model is None or tokenizer is None:
|
| 143 |
+
raise HTTPException(status_code=503, detail="AI service temporarily unavailable")
|
| 144 |
+
|
| 145 |
try:
|
| 146 |
if lang == "python":
|
| 147 |
user_code = autopep8.fix_code(user_code)
|
| 148 |
user_code = re.sub(r"eval\((.*)\)", r"int(\1) # Removed eval for security", user_code)
|
| 149 |
user_code = re.sub(r"/ 0", "/ 1 # Fixed division by zero", user_code)
|
| 150 |
|
| 151 |
+
prompt = f"""Optimize this {lang} code for better performance and readability:
|
| 152 |
+
```{lang}
|
| 153 |
+
{user_code}
|
| 154 |
+
"""
|
| 155 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 156 |
|
| 157 |
with torch.no_grad():
|
| 158 |
+
outputs = model.generate(**inputs, max_length=1024, temperature=0.7)
|
| 159 |
|
| 160 |
optimized_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 161 |
|
| 162 |
+
# Extract code between the last code block markers
|
| 163 |
+
code_blocks = re.findall(r'```(?:python)?\n(.*?)\n```', optimized_code, re.DOTALL)
|
| 164 |
+
if code_blocks:
|
| 165 |
+
optimized_code = code_blocks[-1] # Get the last code block
|
| 166 |
|
| 167 |
+
return optimized_code.strip() if optimized_code else user_code
|
| 168 |
except Exception as e:
|
| 169 |
raise HTTPException(status_code=500, detail=f"AI optimization failed: {str(e)}")
|
| 170 |
|
|
|
|
| 189 |
def health_check():
|
| 190 |
return {
|
| 191 |
"status": "API is running",
|
| 192 |
+
"model": MODEL_NAME if model else "Not loaded",
|
| 193 |
"endpoints": {
|
| 194 |
"evaluate": "POST /evaluate",
|
| 195 |
"optimize": "POST /optimize"
|
| 196 |
}
|
| 197 |
}
|
| 198 |
|
| 199 |
+
@app.on_event("shutdown")
|
| 200 |
+
def shutdown_event():
|
| 201 |
+
cleanup_temp_files()
|
| 202 |
+
|
| 203 |
if __name__ == "__main__":
|
| 204 |
import uvicorn
|
| 205 |
uvicorn.run("main:app", host="0.0.0.0", port=7860)
|