File size: 1,941 Bytes
e7ed4e6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import AutoTokenizer, T5ForConditionalGeneration, AutoConfig
import torch

app = FastAPI(title="CodeT5+ Backend on HuggingFace")

# ==== LOAD MODEL ====
base_ckpt = "Salesforce/codet5p-770m"
finetuned_ckpt = "OSS-forge/codet5p-770m-pyresbugs"

print("Loading tokenizer + config...")
tokenizer = AutoTokenizer.from_pretrained(base_ckpt)
config = AutoConfig.from_pretrained(base_ckpt)

print("Loading fine-tuned model weights...")
model = T5ForConditionalGeneration.from_pretrained(
    finetuned_ckpt,
    config=config
)

device = "cuda" if torch.cuda.is_available() else "cpu"
print("Running on:", device)

model = model.to(device)
model.eval()

# ==== REQUEST / RESPONSE MODELS ====
class GenerateRequest(BaseModel):
    prompt: str
    language: str | None = "Python"
    task: str = "generate"
    max_new_tokens: int = 128
    num_beams: int = 4
    temperature: float = 0.7

class GenerateResponse(BaseModel):
    output: str


def build_prompt(req: GenerateRequest):
    if req.task == "generate":
        return f"Generate {req.language} code:\n{req.prompt}"
    elif req.task == "fix":
        return f"Fix the bug in the following {req.language} code:\n{req.prompt}\n\nCorrected code:"
    else:
        return req.prompt


@app.post("/generate", response_model=GenerateResponse)
def generate(req: GenerateRequest):

    prompt = build_prompt(req)
    inputs = tokenizer(prompt, return_tensors="pt").to(device)

    with torch.no_grad():
        outputs = model.generate(
            **inputs,
            max_new_tokens=req.max_new_tokens,
            num_beams=req.num_beams,
            temperature=req.temperature,
            early_stopping=True
        )

    text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return GenerateResponse(output=text)


@app.get("/")
def root():
    return {"status": "CodeT5+ backend is running πŸš€"}