File size: 1,233 Bytes
3d9e243 f86886b 3d9e243 7e3cc26 fff3a21 ea92f6d fff3a21 7e3cc26 fff3a21 3d9e243 fff3a21 3d9e243 fff3a21 207903e 7e3cc26 fff3a21 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline
import os
os.environ["HF_HOME"] = "/tmp"
spam = pipeline("text-classification", model="valurank/distilroberta-spam-comments-detection")
toxic = pipeline("text-classification", model="s-nlp/roberta_toxicity_classifier")
sentiment = pipeline("text-classification", model = "nlptown/bert-base-multilingual-uncased-sentiment")
nsfw = pipeline("text-classification", model = "michellejieli/NSFW_text_classifier")
app = FastAPI()
@app.get("/")
def root():
return {"status": "ok"}
class Query(BaseModel):
text: str
@app.post("/spam")
def predict_spam(query: Query):
result = spam(query.text)[0]
return {"label": result["label"], "score": result["score"]}
@app.post("/toxic")
def predict_toxic(query: Query):
result = toxic(query.text)[0]
return {"label": result["label"], "score": result["score"]}
@app.post("/sentiment")
def predict_sentiment(query: Query):
result = sentiment(query.text)[0]
return {"label": result["label"], "score": result["score"]}
@app.post("/nsfw")
def predict_nsfw(query: Query):
result = nsfw(query.text)[0]
return {"label": result["label"], "score": result["score"]} |