import os os.environ["HF_HOME"] = "/tmp" os.environ["TRANSFORMERS_CACHE"] = "/tmp" from fastapi import FastAPI from pydantic import BaseModel from transformers import AutoTokenizer, AutoModelForSequenceClassification import torch # Initialize FastAPI app app = FastAPI(title="Confidence Statement API", version="1.0") # Load the fine-tuned model and tokenizer model_name = "mjpsm/Confidence-Statement-Model-final" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) # Define input format class InputText(BaseModel): statement: str # Define prediction function def predict_statement(statement: str): inputs = tokenizer(statement, return_tensors="pt", padding=True, truncation=True, max_length=128) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probabilities = torch.nn.functional.softmax(logits, dim=-1) predicted_class = torch.argmax(probabilities, dim=-1).item() label_mapping = {0: "lack of self-confidence", 1: "self-confident"} return { "label": label_mapping[predicted_class], "confidence_score": round(probabilities[0][predicted_class].item(), 4) } # Define root route @app.get("/") def read_root(): return {"message": "Welcome to the Confidence Statement API!"} # Define prediction route @app.post("/predict") def predict(input_text: InputText): return predict_statement(input_text.statement)