!pip install transformers numpy onnx onnxruntime -q
import onnxruntime as ort
from transformers import AutoTokenizer
import numpy as np
import requests
onnx_model_url = "https://huggingface.co/alanjoshua2005/bert-sms-detector-onnx/resolve/main/bert_sms_detector.onnx"
onnx_model_path = "bert_sms_detector.onnx"
with open(onnx_model_path, "wb") as f:
f.write(requests.get(onnx_model_url).content)
tokenizer = AutoTokenizer.from_pretrained("alanjoshua2005/bert-sms-detector-onnx")
session = ort.InferenceSession(onnx_model_path, providers=["CPUExecutionProvider"])
text = "Congratulations! You won a free prize."
inputs = tokenizer(text, return_tensors="np", padding="max_length", truncation=True, max_length=64)
onnx_inputs = {
"input_ids": inputs["input_ids"].astype(np.int64),
"attention_mask": inputs["attention_mask"].astype(np.int64)
}
outputs = session.run(None, onnx_inputs)
logits = outputs[0]
predicted_class = int(np.argmax(logits, axis=1)[0])
class_map = {0: "Ham (Not Spam)", 1: "Spam"}
print(f"Predicted class: {class_map[predicted_class]}")
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support