File size: 2,070 Bytes
3d9e243
 
 
f86886b
 
89191ca
f86886b
3d9e243
d8e3053
 
89191ca
d8e3053
fff3a21
d8e3053
89191ca
d8e3053
fff3a21
89191ca
3d9e243
89191ca
 
 
 
3d9e243
10f7d04
 
89191ca
 
 
 
fff3a21
89191ca
 
 
 
 
 
 
fff3a21
89191ca
3d9e243
fff3a21
 
207903e
89191ca
7e3cc26
 
 
 
89191ca
08c7878
 
 
89191ca
8b6ec58
 
 
 
 
 
d8e3053
 
 
 
8b6ec58
 
 
 
 
 
 
 
 
 
 
 
 
 
08c7878
89191ca
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
from fastapi import FastAPI
from pydantic import BaseModel
from transformers import pipeline
import os


os.environ["HF_HOME"] = "/tmp" 

SPAM_MODEL = "valurank/distilroberta-spam-comments-detection"
TOXIC_MODEL = "s-nlp/roberta_toxicity_classifier"
SENTIMENT_MODEL =  "nlptown/bert-base-multilingual-uncased-sentiment"
NSFW_MODEL = "michellejieli/NSFW_text_classifier"

spam = pipeline("text-classification", model=SPAM_MODEL)

toxic = pipeline("text-classification", model=TOXIC_MODEL)

sentiment = pipeline("text-classification", model = SENTIMENT_MODEL)

nsfw = pipeline("text-classification", model = NSFW_MODEL)


app = FastAPI()

@app.get("/")
def root():
    return {"status": "ok"}

class Query(BaseModel):
    text: str

@app.post("/spam")
def predict_spam(query: Query):
    result = spam(query.text)[0]
    return {"label": result["label"], "score": result["score"]}

@app.post("/toxic")
def predict_toxic(query: Query):
    result = toxic(query.text)[0]
    return {"label": result["label"], "score": result["score"]}

@app.post("/sentiment")
def predict_sentiment(query: Query):
    result = sentiment(query.text)[0]
    return {"label": result["label"], "score": result["score"]}

@app.post("/nsfw")
def predict_nsfw(query: Query):
    result = nsfw(query.text)[0]
    return {"label": result["label"], "score": result["score"]}

@app.get("/health")
def health_check():

    status = {
        "server": "running",
        "models": {}
    }

    models = {
        "spam": (SPAM_MODEL, spam),
        "toxic": (TOXIC_MODEL, toxic),
        "sentiment": (SENTIMENT_MODEL, sentiment),
        "nsfw": (NSFW_MODEL, nsfw),
    }

    for key, (model_name, model_pipeline) in models.items():
        try:
            model_pipeline("test")
            status["models"][key] = {
                "model_name": model_name,
                "status": "running"
            }
        except Exception as e:
            status["models"][key] = {
                "model_name": model_name,
                "status": f"error: {str(e)}"
            }

    return status