Spaces:
Runtime error
Runtime error
Abineshkumar77
commited on
Commit
ยท
f97bb94
1
Parent(s):
4fcd4f9
Add application file
Browse files- app.py +16 -11
- sentiment_model.onnx +3 -0
app.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
| 1 |
from fastapi import FastAPI
|
|
|
|
| 2 |
from transformers import AutoTokenizer
|
| 3 |
-
from optimum.onnxruntime import ORTModelForSequenceClassification
|
| 4 |
-
import torch
|
| 5 |
import time
|
|
|
|
| 6 |
|
| 7 |
-
# Load the tokenizer and
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
app = FastAPI()
|
| 12 |
|
|
@@ -34,16 +35,22 @@ def analyze_sentiment(tweet: str):
|
|
| 34 |
|
| 35 |
# Tokenize the input tweet
|
| 36 |
inputs = tokenizer(tweet_proc, return_tensors="pt")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
-
# Perform the inference
|
| 39 |
-
with torch.no_grad():
|
| 40 |
-
outputs = model(**inputs)
|
| 41 |
-
|
| 42 |
# Calculate the inference time
|
| 43 |
inference_time = time.time() - start_time
|
| 44 |
|
| 45 |
# Get the probabilities from the logits
|
| 46 |
-
|
|
|
|
| 47 |
|
| 48 |
# Get the label with the highest probability
|
| 49 |
max_prob, max_index = torch.max(probabilities, dim=1)
|
|
@@ -66,5 +73,3 @@ def analyze_sentiment(tweet: str):
|
|
| 66 |
"score": highest_score,
|
| 67 |
"inference_time": round(inference_time, 4) # In seconds
|
| 68 |
}
|
| 69 |
-
|
| 70 |
-
|
|
|
|
| 1 |
from fastapi import FastAPI
|
| 2 |
+
import onnxruntime as ort
|
| 3 |
from transformers import AutoTokenizer
|
|
|
|
|
|
|
| 4 |
import time
|
| 5 |
+
import torch
|
| 6 |
|
| 7 |
+
# Load the tokenizer and ONNX model
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
| 9 |
+
onnx_model_path = "D:/demodeploy/sentiment_model.onnx"
|
| 10 |
+
onnx_session = ort.InferenceSession(onnx_model_path)
|
| 11 |
|
| 12 |
app = FastAPI()
|
| 13 |
|
|
|
|
| 35 |
|
| 36 |
# Tokenize the input tweet
|
| 37 |
inputs = tokenizer(tweet_proc, return_tensors="pt")
|
| 38 |
+
input_ids = inputs["input_ids"].numpy()
|
| 39 |
+
attention_mask = inputs["attention_mask"].numpy()
|
| 40 |
+
|
| 41 |
+
# Perform the inference using ONNX
|
| 42 |
+
onnx_inputs = {
|
| 43 |
+
'input_ids': input_ids,
|
| 44 |
+
'attention_mask': attention_mask
|
| 45 |
+
}
|
| 46 |
+
outputs = onnx_session.run(None, onnx_inputs)
|
| 47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
# Calculate the inference time
|
| 49 |
inference_time = time.time() - start_time
|
| 50 |
|
| 51 |
# Get the probabilities from the logits
|
| 52 |
+
logits = outputs[0]
|
| 53 |
+
probabilities = torch.softmax(torch.tensor(logits), dim=1)
|
| 54 |
|
| 55 |
# Get the label with the highest probability
|
| 56 |
max_prob, max_index = torch.max(probabilities, dim=1)
|
|
|
|
| 73 |
"score": highest_score,
|
| 74 |
"inference_time": round(inference_time, 4) # In seconds
|
| 75 |
}
|
|
|
|
|
|
sentiment_model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b357bdbb92348c9fed6359f64b0bc6dab75c78bde8c121ea49ac70dbb7eb9b5b
|
| 3 |
+
size 498833392
|