ST-THOMAS-OF-AQUINAS commited on
Commit
63591e5
·
verified ·
1 Parent(s): 92e8eff

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +41 -22
app.py CHANGED
@@ -1,41 +1,66 @@
1
- import os
2
-
3
- # --- FIX CACHE PERMISSIONS ---
4
- os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
5
- os.environ["HF_HOME"] = "/tmp/hf_home" # optional for Hugging Face hub files
6
-
7
  from fastapi import FastAPI, Form
 
8
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
9
  import torch
10
  from twilio.twiml.messaging_response import MessagingResponse
 
 
 
 
 
 
11
 
12
- # --- Load model ---
 
 
13
  model_id = "ST-THOMAS-OF-AQUINAS/SCAM"
14
- tokenizer = AutoTokenizer.from_pretrained(model_id)
15
- model = AutoModelForSequenceClassification.from_pretrained(model_id)
 
16
  model.eval()
17
 
18
  label_map = {0: "author1", 1: "author2"}
19
 
20
- # --- FastAPI app ---
21
- app = FastAPI(title="Scam Detector API with Twilio")
22
 
23
- # --- Helper prediction function ---
 
 
24
  def predict_author(text: str):
25
  inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
 
 
26
  with torch.no_grad():
27
  outputs = model(**inputs)
28
  probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
29
  pred = torch.argmax(probs, dim=1).item()
30
  confidence = probs[0][pred].item()
31
- predicted_author = label_map[pred]
 
32
  return predicted_author, round(confidence * 100, 2)
33
 
34
- # --- Twilio WhatsApp webhook ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  @app.post("/whatsapp")
36
  async def whatsapp_reply(Body: str = Form(...)):
37
  resp = MessagingResponse()
38
-
39
  if Body.strip():
40
  author, confidence = predict_author(Body)
41
  reply = f"Prediction: {author}\nConfidence: {confidence}%"
@@ -43,10 +68,4 @@ async def whatsapp_reply(Body: str = Form(...)):
43
  reply = "⚠️ No text detected."
44
 
45
  resp.message(reply)
46
- return str(resp)
47
-
48
- # --- Simple test endpoint ---
49
- @app.get("/predict")
50
- async def predict(text: str):
51
- author, confidence = predict_author(text)
52
- return {"prediction": author, "confidence": confidence}
 
 
 
 
 
 
 
1
  from fastapi import FastAPI, Form
2
+ from fastapi.responses import JSONResponse
3
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
4
  import torch
5
  from twilio.twiml.messaging_response import MessagingResponse
6
+ import os
7
+
8
+ # -----------------------------
9
+ # Environment-safe cache path
10
+ # -----------------------------
11
+ HF_CACHE_DIR = os.getenv("HF_HOME", "/tmp/hf_cache")
12
 
13
+ # -----------------------------
14
+ # Load model from Hugging Face
15
+ # -----------------------------
16
  model_id = "ST-THOMAS-OF-AQUINAS/SCAM"
17
+
18
+ tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=HF_CACHE_DIR)
19
+ model = AutoModelForSequenceClassification.from_pretrained(model_id, cache_dir=HF_CACHE_DIR)
20
  model.eval()
21
 
22
  label_map = {0: "author1", 1: "author2"}
23
 
24
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
25
+ model.to(device)
26
 
27
+ # -----------------------------
28
+ # Helper function
29
+ # -----------------------------
30
  def predict_author(text: str):
31
  inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
32
+ inputs = {k: v.to(device) for k, v in inputs.items()}
33
+
34
  with torch.no_grad():
35
  outputs = model(**inputs)
36
  probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
37
  pred = torch.argmax(probs, dim=1).item()
38
  confidence = probs[0][pred].item()
39
+
40
+ predicted_author = label_map.get(pred, "unknown")
41
  return predicted_author, round(confidence * 100, 2)
42
 
43
+ # -----------------------------
44
+ # FastAPI app
45
+ # -----------------------------
46
+ app = FastAPI(title="Scam Detector API with Twilio")
47
+
48
+ # Health-check route
49
+ @app.get("/")
50
+ async def health_check():
51
+ return {"status": "✅ API is running"}
52
+
53
+ # Simple GET test
54
+ @app.get("/predict")
55
+ async def get_predict(text: str):
56
+ author, confidence = predict_author(text)
57
+ return {"prediction": author, "confidence": confidence}
58
+
59
+ # Twilio WhatsApp POST
60
  @app.post("/whatsapp")
61
  async def whatsapp_reply(Body: str = Form(...)):
62
  resp = MessagingResponse()
63
+
64
  if Body.strip():
65
  author, confidence = predict_author(Body)
66
  reply = f"Prediction: {author}\nConfidence: {confidence}%"
 
68
  reply = "⚠️ No text detected."
69
 
70
  resp.message(reply)
71
+ return JSONResponse(content=str(resp))