Spaces:
Sleeping
Sleeping
File size: 1,311 Bytes
08fac8b da089aa 08fac8b 00db226 08fac8b b1bfca8 08fac8b b1bfca8 08fac8b b1bfca8 08fac8b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 | from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama
app = FastAPI()
# Download and initialize the model when the server starts
llm = Llama.from_pretrained(
repo_id="Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", # Make sure this says 0.5B!
filename="*q4_k_m.gguf",
n_ctx=2048,
n_threads=2,
n_batch=512
)
class EvalRequest(BaseModel):
task_description: str
python_code: str
# --- ADDED HEALTH CHECK ROUTE HERE ---
@app.get("/")
async def health_check():
return {"status": "Online", "message": "AI Code Evaluator is running! Send POST requests to /evaluate"}
# -------------------------------------
@app.post("/evaluate")
async def evaluate_code(request: EvalRequest):
# Removed the extra instructions here to save tokens and speed up reading time
prompt = f"{request.task_description}\n\nStudent Code:\n{request.python_code}"
response = llm.create_chat_completion(
messages=[
{"role": "system", "content": "You are a Frendly Python grader. Output only valid JSON."},
{"role": "user", "content": prompt}
],
max_tokens=250,
temperature=0.1 # Lowered slightly for stricter JSON formatting
)
return {"evaluation": response['choices'][0]['message']['content']} |