File size: 1,612 Bytes
2c52774 01dee50 2c52774 01dee50 2c52774 01dee50 2c52774 01dee50 2c52774 01dee50 2c52774 01dee50 2c52774 01dee50 2c52774 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
import gradio as gr
from transformers import pipeline
from fastapi import FastAPI, Form
from fastapi.responses import Response
import uvicorn
# Load model
model_id = "ST-THOMAS-OF-AQUINAS/SCAM"
pipe = pipeline("text-classification", model=model_id)
# Label map (adjust based on your model)
label_map = {0: "author1", 1: "author2"}
def predict(text: str):
if not text.strip():
return "⚠️ No input text"
results = pipe(text)
label = results[0]["label"]
score = results[0]["score"]
if label.startswith("LABEL_"):
idx = int(label.split("_")[1])
label = label_map.get(idx, label)
return f"Prediction: {label}\nConfidence: {round(score * 100, 2)}%"
# --- Gradio Interface (for testing manually) ---
iface = gr.Interface(
fn=predict,
inputs=gr.Textbox(label="Enter WhatsApp Message"),
outputs=gr.Textbox(label="Prediction"),
title="📲 WhatsApp Scam Detector",
description="Paste a WhatsApp message and the model will predict its author."
)
# --- FastAPI App (for Twilio) ---
app = FastAPI()
# Twilio will POST with form data (Body=message text)
@app.post("/predict")
async def predict_api(Body: str = Form(...)):
reply_text = predict(Body)
# Twilio expects TwiML XML
twiml_response = f"""<?xml version="1.0" encoding="UTF-8"?>
<Response>
<Message>{reply_text}</Message>
</Response>"""
return Response(content=twiml_response, media_type="application/xml")
# Mount Gradio UI inside FastAPI
app = gr.mount_gradio_app(app, iface, path="/")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)
|