nryadav18's picture
Update app.py
b1bfca8 verified
raw
history blame
1.31 kB
from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama
app = FastAPI()
# Download and initialize the model when the server starts
llm = Llama.from_pretrained(
repo_id="Qwen/Qwen2.5-Coder-0.5B-Instruct-GGUF", # Make sure this says 0.5B!
filename="*q4_k_m.gguf",
n_ctx=2048,
n_threads=2,
n_batch=512
)
class EvalRequest(BaseModel):
task_description: str
python_code: str
# --- ADDED HEALTH CHECK ROUTE HERE ---
@app.get("/")
async def health_check():
return {"status": "Online", "message": "AI Code Evaluator is running! Send POST requests to /evaluate"}
# -------------------------------------
@app.post("/evaluate")
async def evaluate_code(request: EvalRequest):
# Removed the extra instructions here to save tokens and speed up reading time
prompt = f"{request.task_description}\n\nStudent Code:\n{request.python_code}"
response = llm.create_chat_completion(
messages=[
{"role": "system", "content": "You are a Frendly Python grader. Output only valid JSON."},
{"role": "user", "content": prompt}
],
max_tokens=250,
temperature=0.1 # Lowered slightly for stricter JSON formatting
)
return {"evaluation": response['choices'][0]['message']['content']}