Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import torch | |
| from transformers import AlbertTokenizer, AlbertForSequenceClassification, BertTokenizer, BertForSequenceClassification | |
| import torch.nn.functional as F | |
| # Load models | |
| albert_model = AlbertForSequenceClassification.from_pretrained("Deepaksai1/albert-fraud-detector-v2").eval() | |
| albert_tokenizer = AlbertTokenizer.from_pretrained("Deepaksai1/albert-fraud-detector-v2") | |
| finbert_model = BertForSequenceClassification.from_pretrained("Deepaksai1/finbert-fraud-detector-v2").eval() | |
| finbert_tokenizer = BertTokenizer.from_pretrained("Deepaksai1/finbert-fraud-detector-v2") | |
| # Inference function | |
| def predict_model(step, tx_type, amount, old_org, new_org, old_dest, new_dest, model_name): | |
| text = f"Step: {step}, Type: {tx_type}, Amount: {amount}, " \ | |
| f"OldBalOrig: {old_org}, NewBalOrig: {new_org}, " \ | |
| f"OldBalDest: {old_dest}, NewBalDest: {new_dest}" | |
| tokenizer = albert_tokenizer if model_name == "ALBERT" else finbert_tokenizer | |
| model = albert_model if model_name == "ALBERT" else finbert_model | |
| inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128) | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| probs = F.softmax(outputs.logits, dim=1) | |
| pred = torch.argmax(probs).item() | |
| fraud_score = probs[0][1].item() | |
| return "Fraud" if pred == 1 else "Not Fraud", round(fraud_score, 4) | |
| # Example values | |
| examples = [ | |
| [151, "CASH_OUT", 1633227.0, 1633227.0, 0.0, 2865353.22, 4498580.23, "ALBERT"], | |
| [353, "CASH_OUT", 174566.53, 174566.53, 0.0, 1191715.74, 1366282.27, "FinBERT"], | |
| [357, "TRANSFER", 484493.06, 484493.06, 0.0, 0.0, 0.0, "ALBERT"], | |
| [43, "CASH_OUT", 81571.63, 0.0, 0.0, 176194.2, 257765.83, "FinBERT"], | |
| [307, "DEBIT", 247.82, 11544.0, 11296.18, 3550535.53, 3550783.36, "ALBERT"], | |
| [350, "DEBIT", 4330.57, 3766.0, 0.0, 239435.41, 243765.98, "FinBERT"] | |
| ] | |
| # Gradio Interface | |
| with gr.Blocks() as demo: | |
| gr.Markdown("## 🔎 Fraud Detection with ALBERT and FinBERT") | |
| with gr.Row(): | |
| step = gr.Number(label="Step", value=1) | |
| tx_type = gr.Dropdown(choices=["CASH_OUT", "TRANSFER", "PAYMENT", "DEBIT", "CASH_IN"], label="Transaction Type") | |
| amount = gr.Number(label="Amount", value=0.0) | |
| with gr.Row(): | |
| old_org = gr.Number(label="Old Balance Orig", value=0.0) | |
| new_org = gr.Number(label="New Balance Orig", value=0.0) | |
| with gr.Row(): | |
| old_dest = gr.Number(label="Old Balance Dest", value=0.0) | |
| new_dest = gr.Number(label="New Balance Dest", value=0.0) | |
| model_selector = gr.Dropdown(choices=["ALBERT", "FinBERT"], value="ALBERT", label="Select Model") | |
| with gr.Row(): | |
| predict_btn = gr.Button("Predict") | |
| pred_label = gr.Label(label="Prediction") | |
| prob_score = gr.Number(label="Fraud Probability") | |
| # Bind function | |
| predict_btn.click(fn=predict_model, | |
| inputs=[step, tx_type, amount, old_org, new_org, old_dest, new_dest, model_selector], | |
| outputs=[pred_label, prob_score]) | |
| gr.Examples(examples=examples, | |
| inputs=[step, tx_type, amount, old_org, new_org, old_dest, new_dest, model_selector]) | |
| # Launch app | |
| if __name__ == "__main__": | |
| demo.launch() | |