Spaces:
Sleeping
Sleeping
File size: 1,665 Bytes
d379983 132a058 e0d585f d379983 132a058 d379983 132a058 d379983 132a058 d379983 5ddb4bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import os
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from fastapi import FastAPI
from pydantic import BaseModel
import torch
import os
# 🧱 Set all possible cache directories to writable locations
os.environ["HF_HOME"] = "/tmp/huggingface"
os.environ["TRANSFORMERS_CACHE"] = "/tmp/huggingface/transformers"
os.environ["HF_DATASETS_CACHE"] = "/tmp/huggingface/datasets"
os.environ["XDG_CACHE_HOME"] = "/tmp/huggingface" # prevents /.cache access
# Ensure directory exists
os.makedirs("/tmp/huggingface/transformers", exist_ok=True)
# Initialize FastAPI
app = FastAPI(title="Check-ins Classifier API", version="1.0")
# Load model and tokenizer
MODEL_NAME = "mjpsm/check-ins-classifier"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME)
model.eval()
# Label mapping
id2label = {
0: "Bad",
1: "Mediocre",
2: "Good"
}
# Input schema
class InputText(BaseModel):
text: str
@app.post("/predict")
async def predict(data: InputText):
inputs = tokenizer(data.text, return_tensors="pt", truncation=True, padding=True)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
predicted_label_id = torch.argmax(probs, dim=-1).item()
return {
"input_text": data.text,
"predicted_label": id2label[predicted_label_id],
"label_id": predicted_label_id,
"probabilities": probs.tolist()
}
@app.get("/")
async def home():
return {"message": "Welcome to the Check-ins Classifier API. Use POST /predict to classify text."}
|