ST-THOMAS-OF-AQUINAS commited on
Commit
d4c1eaa
·
verified ·
1 Parent(s): ae80ca8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -16
app.py CHANGED
@@ -1,38 +1,61 @@
1
  from fastapi import FastAPI, Form
 
 
2
  from twilio.twiml.messaging_response import MessagingResponse
3
- from transformers import pipeline
4
 
5
- # --- Hugging Face Inference API setup ---
6
- HF_MODEL_ID = "ST-THOMAS-OF-AQUINAS/SCAM"
7
 
 
 
 
 
 
8
 
9
- classifier = pipeline(
10
- "text-classification",
11
- model=HF_MODEL_ID,
12
-
13
- )
14
 
15
  # --- FastAPI app ---
16
  app = FastAPI(title="Scam Detector API with Twilio")
17
 
18
- # --- Prediction function ---
19
  def predict_author(text: str):
20
- if not text.strip():
21
- return "No text", 0.0
22
- result = classifier(text)[0] # {'label': 'author1', 'score': 0.95}
23
- return result['label'], round(result['score'] * 100, 2)
 
 
 
 
 
 
 
24
 
25
  # --- Twilio WhatsApp webhook ---
26
  @app.post("/whatsapp")
27
  async def whatsapp_reply(Body: str = Form(...)):
28
  resp = MessagingResponse()
29
- author, confidence = predict_author(Body)
30
- reply = f"Prediction: {author}\nConfidence: {confidence}%"
 
 
 
31
  resp.message(reply)
32
  return str(resp)
33
 
34
- # --- Simple GET test endpoint ---
35
  @app.get("/predict")
36
  async def predict(text: str):
37
  author, confidence = predict_author(text)
38
  return {"prediction": author, "confidence": confidence}
 
 
 
 
 
 
 
1
  from fastapi import FastAPI, Form
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+ import torch
4
  from twilio.twiml.messaging_response import MessagingResponse
5
+ import os
6
 
7
+ # --- Fix cache permission issue by setting cache directory ---
8
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
9
 
10
+ # --- Load model ---
11
+ model_id = "ST-THOMAS-OF-AQUINAS/SCAM"
12
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ model = AutoModelForSequenceClassification.from_pretrained(model_id)
14
+ model.eval()
15
 
16
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ model.to(device)
18
+
19
+ # --- Label map (update according to your trained model) ---
20
+ label_map = {0: "author1", 1: "author2"}
21
 
22
  # --- FastAPI app ---
23
  app = FastAPI(title="Scam Detector API with Twilio")
24
 
25
+ # --- Helper prediction function ---
26
  def predict_author(text: str):
27
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
28
+ inputs = {k: v.to(device) for k, v in inputs.items()}
29
+
30
+ with torch.no_grad():
31
+ outputs = model(**inputs)
32
+ probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
33
+ pred = torch.argmax(probs, dim=1).item()
34
+ confidence = probs[0][pred].item()
35
+
36
+ predicted_author = label_map[pred]
37
+ return predicted_author, round(confidence * 100, 2)
38
 
39
  # --- Twilio WhatsApp webhook ---
40
  @app.post("/whatsapp")
41
  async def whatsapp_reply(Body: str = Form(...)):
42
  resp = MessagingResponse()
43
+ if Body.strip():
44
+ author, confidence = predict_author(Body)
45
+ reply = f"Prediction: {author}\nConfidence: {confidence}%"
46
+ else:
47
+ reply = "⚠️ No text detected."
48
  resp.message(reply)
49
  return str(resp)
50
 
51
+ # --- Simple test endpoint ---
52
  @app.get("/predict")
53
  async def predict(text: str):
54
  author, confidence = predict_author(text)
55
  return {"prediction": author, "confidence": confidence}
56
+
57
+ # --- Run locally (optional) ---
58
+ if __name__ == "__main__":
59
+ import uvicorn
60
+ port = int(os.environ.get("PORT", 8000))
61
+ uvicorn.run(app, host="0.0.0.0", port=port)