cjell commited on
Commit
d8e3053
·
1 Parent(s): 8b6ec58

healthcheck+

Browse files
Files changed (1) hide show
  1. app.py +13 -8
app.py CHANGED
@@ -6,13 +6,18 @@ import os
6
 
7
  os.environ["HF_HOME"] = "/tmp"
8
 
9
- spam = pipeline("text-classification", model="valurank/distilroberta-spam-comments-detection")
 
 
 
10
 
11
- toxic = pipeline("text-classification", model="s-nlp/roberta_toxicity_classifier")
12
 
13
- sentiment = pipeline("text-classification", model = "nlptown/bert-base-multilingual-uncased-sentiment")
14
 
15
- nsfw = pipeline("text-classification", model = "michellejieli/NSFW_text_classifier")
 
 
16
 
17
 
18
  app = FastAPI()
@@ -53,10 +58,10 @@ def health_check():
53
  }
54
 
55
  models = {
56
- "spam": ("valurank/distilroberta-spam-comments-detection", spam),
57
- "toxic": ("s-nlp/roberta_toxicity_classifier", toxic),
58
- "sentiment": ("nlptown/bert-base-multilingual-uncased-sentiment", sentiment),
59
- "nsfw": ("michellejieli/NSFW_text_classifier", nsfw),
60
  }
61
 
62
  for key, (model_name, model_pipeline) in models.items():
 
6
 
7
  os.environ["HF_HOME"] = "/tmp"
8
 
9
+ SPAM_MODEL = "valurank/distilroberta-spam-comments-detection"
10
+ TOXIC_MODEL = "s-nlp/roberta_toxicity_classifier"
11
+ SENTIMENT_MODEL = "nlptown/bert-base-multilingual-uncased-sentiment"
12
+ NSFW_MODEL = "michellejieli/NSFW_text_classifier"
13
 
14
+ spam = pipeline("text-classification", model=SPAM_MODEL)
15
 
16
+ toxic = pipeline("text-classification", model=TOXIC_MODEL)
17
 
18
+ sentiment = pipeline("text-classification", model = SENTIMENT_MODEL)
19
+
20
+ nsfw = pipeline("text-classification", model = NSFW_MODEL)
21
 
22
 
23
  app = FastAPI()
 
58
  }
59
 
60
  models = {
61
+ "spam": (SPAM_MODEL, spam),
62
+ "toxic": (TOXIC_MODEL, toxic),
63
+ "sentiment": (SENTIMENT_MODEL, sentiment),
64
+ "nsfw": (NSFW_MODEL, nsfw),
65
  }
66
 
67
  for key, (model_name, model_pipeline) in models.items():