import gradio as gr from transformers import pipeline from fastapi import FastAPI, Form from fastapi.responses import Response import uvicorn # Load model model_id = "ST-THOMAS-OF-AQUINAS/SCAM" pipe = pipeline("text-classification", model=model_id) # Label map (adjust based on your model) label_map = {0: "author1", 1: "author2"} def predict(text: str): if not text.strip(): return "⚠️ No input text" results = pipe(text) label = results[0]["label"] score = results[0]["score"] if label.startswith("LABEL_"): idx = int(label.split("_")[1]) label = label_map.get(idx, label) return f"Prediction: {label}\nConfidence: {round(score * 100, 2)}%" # --- Gradio Interface (for testing manually) --- iface = gr.Interface( fn=predict, inputs=gr.Textbox(label="Enter WhatsApp Message"), outputs=gr.Textbox(label="Prediction"), title="📲 WhatsApp Scam Detector", description="Paste a WhatsApp message and the model will predict its author." ) # --- FastAPI App (for Twilio) --- app = FastAPI() # Twilio will POST with form data (Body=message text) @app.post("/predict") async def predict_api(Body: str = Form(...)): reply_text = predict(Body) # Twilio expects TwiML XML twiml_response = f""" {reply_text} """ return Response(content=twiml_response, media_type="application/xml") # Mount Gradio UI inside FastAPI app = gr.mount_gradio_app(app, iface, path="/") if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)