hadokenvskikoken commited on
Commit
b3fb970
·
verified ·
1 Parent(s): 938070c

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +26 -37
main.py CHANGED
@@ -8,9 +8,20 @@ import time
8
  import re
9
  import os
10
  from pathlib import Path
 
11
 
 
12
  app = FastAPI(title="Code Evaluation & Optimization API")
13
 
 
 
 
 
 
 
 
 
 
14
  # --- Environment Setup ---
15
  CACHE_DIR = Path("/.cache/huggingface")
16
  CACHE_DIR.mkdir(parents=True, exist_ok=True)
@@ -30,37 +41,21 @@ try:
30
  device_map="auto",
31
  torch_dtype=torch.float16,
32
  cache_dir=str(CACHE_DIR)
33
- )
34
  except Exception as e:
35
  raise RuntimeError(f"Failed to load model: {str(e)}")
36
 
37
- # --- Request Models ---
38
  class CodeRequest(BaseModel):
39
  code: str
40
  language: str = "python" # Default to Python
41
 
42
  # --- Helper Functions ---
43
- def detect_language(user_code: str) -> str:
44
- """Detect programming language based on code patterns"""
45
- patterns = {
46
- "python": ["def ", "print(", "import "],
47
- "java": ["public static void main", "System.out.println"],
48
- "cpp": ["#include <iostream>", "cout <<"],
49
- "javascript": ["function ", "console.log"]
50
- }
51
-
52
- for lang, keywords in patterns.items():
53
- if any(keyword in user_code for keyword in keywords):
54
- return lang
55
- return "unknown"
56
-
57
  def evaluate_code(user_code: str, lang: str) -> dict:
58
  """Evaluate code for correctness, performance, and security"""
59
  start_time = time.time()
60
  file_ext = {"python": "py", "java": "java", "cpp": "cpp", "javascript": "js"}.get(lang, "txt")
61
  filename = f"temp_script.{file_ext}"
62
 
63
- # Save user code to a temporary file
64
  with open(filename, "w") as f:
65
  f.write(user_code)
66
 
@@ -74,12 +69,11 @@ def evaluate_code(user_code: str, lang: str) -> dict:
74
  try:
75
  if lang in commands:
76
  result = subprocess.run(" ".join(commands[lang]),
77
- capture_output=True,
78
- text=True,
79
- timeout=5,
80
- shell=True)
81
  exec_time = time.time() - start_time
82
-
83
  correctness = 1 if result.returncode == 0 else 0
84
  error_message = None if correctness else result.stderr.strip()
85
  else:
@@ -92,7 +86,6 @@ def evaluate_code(user_code: str, lang: str) -> dict:
92
  readability_score = 20 if len(user_code) < 200 else 10
93
  efficiency_score = 30 if exec_time < 1 else 10
94
  security_score = 20 if "eval(" not in user_code and "exec(" not in user_code else 0
95
-
96
  total_score = (correctness * 50) + readability_score + efficiency_score + security_score
97
 
98
  feedback = []
@@ -104,10 +97,8 @@ def evaluate_code(user_code: str, lang: str) -> dict:
104
 
105
  if efficiency_score < 30:
106
  feedback.append("⚡ Performance Issue: Code took longer to execute. Optimize loops or calculations.")
107
-
108
  if readability_score < 20:
109
  feedback.append("📖 Readability Issue: Code is lengthy. Break into smaller functions.")
110
-
111
  if security_score == 0:
112
  feedback.append("🔒 Security Risk: Avoid using eval() or exec().")
113
 
@@ -120,16 +111,14 @@ def evaluate_code(user_code: str, lang: str) -> dict:
120
  }
121
 
122
  def optimize_code_ai(user_code: str, lang: str) -> str:
123
- """Generate optimized code using AI and formatting"""
124
  try:
125
- # Basic formatting first
126
  if lang == "python":
127
  user_code = autopep8.fix_code(user_code)
128
  user_code = re.sub(r"eval\((.*)\)", r"int(\1) # Removed eval for security", user_code)
129
  user_code = re.sub(r"/ 0", "/ 1 # Fixed division by zero", user_code)
130
 
131
- # AI-powered optimization
132
- prompt = f"Optimize this {lang} code for efficiency and security:\n```{lang}\n{user_code}\n```\nOptimized version:"
133
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
134
 
135
  with torch.no_grad():
@@ -137,7 +126,6 @@ def optimize_code_ai(user_code: str, lang: str) -> str:
137
 
138
  optimized_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
139
 
140
- # Extract just the code block if LLM added explanation
141
  code_match = re.search(r'```(?:python)?\n(.*?)\n```', optimized_code, re.DOTALL)
142
  if code_match:
143
  optimized_code = code_match.group(1)
@@ -149,7 +137,6 @@ def optimize_code_ai(user_code: str, lang: str) -> str:
149
  # --- API Endpoints ---
150
  @app.post("/evaluate")
151
  async def evaluate_endpoint(request: CodeRequest):
152
- """Evaluate code for correctness and quality"""
153
  try:
154
  result = evaluate_code(request.code, request.language)
155
  return {"status": "success", "result": result}
@@ -158,7 +145,6 @@ async def evaluate_endpoint(request: CodeRequest):
158
 
159
  @app.post("/optimize")
160
  async def optimize_endpoint(request: CodeRequest):
161
- """Generate optimized version of the code"""
162
  try:
163
  optimized = optimize_code_ai(request.code, request.language)
164
  return {"status": "success", "optimized_code": optimized}
@@ -168,12 +154,15 @@ async def optimize_endpoint(request: CodeRequest):
168
  @app.get("/")
169
  def health_check():
170
  return {
171
- "status": "Code Evaluation API is running!",
172
- "model_loaded": MODEL_NAME,
173
- "cache_dir": str(CACHE_DIR)
 
 
 
174
  }
175
 
176
- # For local testing
177
  if __name__ == "__main__":
178
  import uvicorn
179
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
8
  import re
9
  import os
10
  from pathlib import Path
11
+ from fastapi.middleware.cors import CORSMiddleware
12
 
13
+ # Initialize FastAPI app (critical to name it 'app')
14
  app = FastAPI(title="Code Evaluation & Optimization API")
15
 
16
+ # Required CORS configuration
17
+ app.add_middleware(
18
+ CORSMiddleware,
19
+ allow_origins=["*"],
20
+ allow_credentials=True,
21
+ allow_methods=["*"],
22
+ allow_headers=["*"],
23
+ )
24
+
25
  # --- Environment Setup ---
26
  CACHE_DIR = Path("/.cache/huggingface")
27
  CACHE_DIR.mkdir(parents=True, exist_ok=True)
 
41
  device_map="auto",
42
  torch_dtype=torch.float16,
43
  cache_dir=str(CACHE_DIR)
 
44
  except Exception as e:
45
  raise RuntimeError(f"Failed to load model: {str(e)}")
46
 
47
+ # --- Request Model ---
48
  class CodeRequest(BaseModel):
49
  code: str
50
  language: str = "python" # Default to Python
51
 
52
  # --- Helper Functions ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  def evaluate_code(user_code: str, lang: str) -> dict:
54
  """Evaluate code for correctness, performance, and security"""
55
  start_time = time.time()
56
  file_ext = {"python": "py", "java": "java", "cpp": "cpp", "javascript": "js"}.get(lang, "txt")
57
  filename = f"temp_script.{file_ext}"
58
 
 
59
  with open(filename, "w") as f:
60
  f.write(user_code)
61
 
 
69
  try:
70
  if lang in commands:
71
  result = subprocess.run(" ".join(commands[lang]),
72
+ capture_output=True,
73
+ text=True,
74
+ timeout=5,
75
+ shell=True)
76
  exec_time = time.time() - start_time
 
77
  correctness = 1 if result.returncode == 0 else 0
78
  error_message = None if correctness else result.stderr.strip()
79
  else:
 
86
  readability_score = 20 if len(user_code) < 200 else 10
87
  efficiency_score = 30 if exec_time < 1 else 10
88
  security_score = 20 if "eval(" not in user_code and "exec(" not in user_code else 0
 
89
  total_score = (correctness * 50) + readability_score + efficiency_score + security_score
90
 
91
  feedback = []
 
97
 
98
  if efficiency_score < 30:
99
  feedback.append("⚡ Performance Issue: Code took longer to execute. Optimize loops or calculations.")
 
100
  if readability_score < 20:
101
  feedback.append("📖 Readability Issue: Code is lengthy. Break into smaller functions.")
 
102
  if security_score == 0:
103
  feedback.append("🔒 Security Risk: Avoid using eval() or exec().")
104
 
 
111
  }
112
 
113
  def optimize_code_ai(user_code: str, lang: str) -> str:
114
+ """Generate optimized code using AI"""
115
  try:
 
116
  if lang == "python":
117
  user_code = autopep8.fix_code(user_code)
118
  user_code = re.sub(r"eval\((.*)\)", r"int(\1) # Removed eval for security", user_code)
119
  user_code = re.sub(r"/ 0", "/ 1 # Fixed division by zero", user_code)
120
 
121
+ prompt = f"Optimize this {lang} code:\n```{lang}\n{user_code}\n```\nOptimized version:"
 
122
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
123
 
124
  with torch.no_grad():
 
126
 
127
  optimized_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
128
 
 
129
  code_match = re.search(r'```(?:python)?\n(.*?)\n```', optimized_code, re.DOTALL)
130
  if code_match:
131
  optimized_code = code_match.group(1)
 
137
  # --- API Endpoints ---
138
  @app.post("/evaluate")
139
  async def evaluate_endpoint(request: CodeRequest):
 
140
  try:
141
  result = evaluate_code(request.code, request.language)
142
  return {"status": "success", "result": result}
 
145
 
146
  @app.post("/optimize")
147
  async def optimize_endpoint(request: CodeRequest):
 
148
  try:
149
  optimized = optimize_code_ai(request.code, request.language)
150
  return {"status": "success", "optimized_code": optimized}
 
154
  @app.get("/")
155
  def health_check():
156
  return {
157
+ "status": "API is running",
158
+ "model": MODEL_NAME,
159
+ "endpoints": {
160
+ "evaluate": "POST /evaluate",
161
+ "optimize": "POST /optimize"
162
+ }
163
  }
164
 
165
+ # Required for Hugging Face Spaces
166
  if __name__ == "__main__":
167
  import uvicorn
168
+ uvicorn.run("main:app", host="0.0.0.0", port=7860)