File size: 1,554 Bytes
08fac8b
 
 
 
 
 
 
 
da089aa
 
 
 
 
08fac8b
 
 
 
 
 
00db226
 
 
 
 
 
08fac8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama

app = FastAPI()

# Download and initialize the model when the server starts
llm = Llama.from_pretrained(
    repo_id="Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", # Make sure this says 0.5B!
    filename="*q4_k_m.gguf", 
    n_ctx=2048, 
    n_threads=2, 
    n_batch=512  
)

class EvalRequest(BaseModel):
    task_description: str
    python_code: str

# --- ADDED HEALTH CHECK ROUTE HERE ---
@app.get("/")
async def health_check():
    return {"status": "Online", "message": "AI Code Evaluator is running! Send POST requests to /evaluate"}
# -------------------------------------

@app.post("/evaluate")
async def evaluate_code(request: EvalRequest):
    prompt = f"Task Description:\n{request.task_description}\n\nSubmitted Code:\n{request.python_code}\n\nEvaluate the code against the task. Assign a final score out of 10. Keep your feedback concise and helpful."
    
    response = llm.create_chat_completion(
        messages=[
            # Framing the model specifically for grading student submissions
            {"role": "system", "content": "You are an expert Python instructor. You evaluate student code submissions accurately, checking for logical correctness and task completion."},
            {"role": "user", "content": prompt}
        ],
        max_tokens=250, # Limit response length to keep API fast
        temperature=0.2 # Low temperature for consistent scoring
    )
    
    return {"evaluation": response['choices'][0]['message']['content']}