Spaces:
Sleeping
Sleeping
File size: 1,331 Bytes
08fac8b da089aa 08fac8b 00db226 08fac8b 0911813 08fac8b 0911813 a56c92a 0911813 08fac8b b1bfca8 a56c92a 08fac8b 0911813 08fac8b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama
app = FastAPI()
# Download and initialize the model when the server starts
llm = Llama.from_pretrained(
repo_id="Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", # Make sure this says 0.5B!
filename="*q4_k_m.gguf",
n_ctx=2048,
n_threads=2,
n_batch=512
)
class EvalRequest(BaseModel):
task_description: str
python_code: str
# --- ADDED HEALTH CHECK ROUTE HERE ---
@app.get("/")
async def health_check():
return {"status": "Online", "message": "AI Code Evaluator is running! Send POST requests to /evaluate"}
# -------------------------------------
@app.post("/evaluate")
async def evaluate_code(request: EvalRequest):
prompt = f"Task Context:\n{request.task_description}\n\nStudent Code:\n{request.python_code}"
response = llm.create_chat_completion(
messages=[
{
"role": "system",
"content": "You are a friendly Python grader. Output ONLY valid JSON."
},
{"role": "user", "content": prompt}
],
max_tokens=250,
temperature=0.1,
# THIS IS THE MAGIC LINE:
response_format={"type": "json_object"}
)
return {"evaluation": response['choices'][0]['message']['content']} |