from fastapi import FastAPI from pydantic import BaseModel from llama_cpp import Llama app = FastAPI() # Download and initialize the model when the server starts llm = Llama.from_pretrained( repo_id="Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", # Make sure this says 0.5B! filename="*q4_k_m.gguf", n_ctx=2048, n_threads=2, n_batch=512 ) class EvalRequest(BaseModel): task_description: str python_code: str # --- ADDED HEALTH CHECK ROUTE HERE --- @app.get("/") async def health_check(): return {"status": "Online", "message": "AI Code Evaluator is running! Send POST requests to /evaluate"} # ------------------------------------- @app.post("/evaluate") async def evaluate_code(request: EvalRequest): prompt = f"Task Description:\n{request.task_description}\n\nSubmitted Code:\n{request.python_code}\n\nEvaluate the code against the task. Assign a final score out of 10. Keep your feedback concise and helpful." response = llm.create_chat_completion( messages=[ # Framing the model specifically for grading student submissions {"role": "system", "content": "You are an expert Python instructor. You evaluate student code submissions accurately, checking for logical correctness and task completion."}, {"role": "user", "content": prompt} ], max_tokens=250, # Limit response length to keep API fast temperature=0.2 # Low temperature for consistent scoring ) return {"evaluation": response['choices'][0]['message']['content']}