Spaces:
Sleeping
Sleeping
File size: 1,326 Bytes
08fac8b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 | from fastapi import FastAPI
from pydantic import BaseModel
from llama_cpp import Llama
app = FastAPI()
# Download and initialize the model when the server starts
llm = Llama.from_pretrained(
repo_id="Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF",
filename="*q4_k_m.gguf", # 4-bit quantization for speed and low memory
n_ctx=2048 # Context window size
)
class EvalRequest(BaseModel):
task_description: str
python_code: str
@app.post("/evaluate")
async def evaluate_code(request: EvalRequest):
prompt = f"Task Description:\n{request.task_description}\n\nSubmitted Code:\n{request.python_code}\n\nEvaluate the code against the task. Assign a final score out of 10. Keep your feedback concise and helpful."
response = llm.create_chat_completion(
messages=[
# Framing the model specifically for grading student submissions
{"role": "system", "content": "You are an expert Python instructor. You evaluate student code submissions accurately, checking for logical correctness and task completion."},
{"role": "user", "content": prompt}
],
max_tokens=250, # Limit response length to keep API fast
temperature=0.2 # Low temperature for consistent scoring
)
return {"evaluation": response['choices'][0]['message']['content']} |