|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
from fastapi import FastAPI, Form |
|
|
from fastapi.responses import Response |
|
|
import uvicorn |
|
|
|
|
|
|
|
|
model_id = "ST-THOMAS-OF-AQUINAS/SCAM" |
|
|
pipe = pipeline("text-classification", model=model_id) |
|
|
|
|
|
|
|
|
label_map = {0: "author1", 1: "author2"} |
|
|
|
|
|
def predict(text: str): |
|
|
if not text.strip(): |
|
|
return "⚠️ No input text" |
|
|
|
|
|
results = pipe(text) |
|
|
label = results[0]["label"] |
|
|
score = results[0]["score"] |
|
|
|
|
|
if label.startswith("LABEL_"): |
|
|
idx = int(label.split("_")[1]) |
|
|
label = label_map.get(idx, label) |
|
|
|
|
|
return f"Prediction: {label}\nConfidence: {round(score * 100, 2)}%" |
|
|
|
|
|
|
|
|
|
|
|
iface = gr.Interface( |
|
|
fn=predict, |
|
|
inputs=gr.Textbox(label="Enter WhatsApp Message"), |
|
|
outputs=gr.Textbox(label="Prediction"), |
|
|
title="📲 WhatsApp Scam Detector", |
|
|
description="Paste a WhatsApp message and the model will predict its author." |
|
|
) |
|
|
|
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
|
|
|
@app.post("/predict") |
|
|
async def predict_api(Body: str = Form(...)): |
|
|
reply_text = predict(Body) |
|
|
|
|
|
|
|
|
twiml_response = f"""<?xml version="1.0" encoding="UTF-8"?> |
|
|
<Response> |
|
|
<Message>{reply_text}</Message> |
|
|
</Response>""" |
|
|
|
|
|
return Response(content=twiml_response, media_type="application/xml") |
|
|
|
|
|
|
|
|
app = gr.mount_gradio_app(app, iface, path="/") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|
|