File size: 852 Bytes
c755cab
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from fastapi import FastAPI
from pydantic import BaseModel
import torch
import os
from transformers import RobertaTokenizer

class InputData(BaseModel):
    text: str

app = FastAPI()

# Load model
model_path = os.path.join(os.path.dirname(__file__), "roberta_model.pkl")
model = torch.load(model_path, map_location=torch.device("cpu"))
model.eval()

# Load tokenizer
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")

@app.get("/")
async def root():
    return {"message": "RoBERTa FastAPI Space is running!"}

@app.post("/predict")
async def predict(data: InputData):
    inputs = tokenizer(data.text, return_tensors="pt", truncation=True, padding=True)
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        prediction = torch.argmax(logits, dim=1).item()
    return {"prediction": prediction}