Spaces:
Runtime error
Runtime error
Abineshkumar77
commited on
Commit
·
190bc8f
1
Parent(s):
c306328
Add application file
Browse files- app.py +18 -33
- export_to_onnx.py +18 -0
- requirements.txt +3 -0
app.py
CHANGED
|
@@ -1,19 +1,18 @@
|
|
| 1 |
-
from fastapi import FastAPI, Query
|
| 2 |
-
from transformers import pipeline, AutoTokenizer
|
| 3 |
import onnxruntime as ort
|
|
|
|
| 4 |
import numpy as np
|
| 5 |
import time
|
|
|
|
| 6 |
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
# Initialize the Hugging Face pipeline for sentiment analysis
|
| 10 |
-
pipe = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-sentiment")
|
| 11 |
-
|
| 12 |
-
# Load the ONNX model and tokenizer
|
| 13 |
onnx_model_path = "sentiment_model.onnx"
|
| 14 |
session = ort.InferenceSession(onnx_model_path)
|
|
|
|
|
|
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
| 16 |
|
|
|
|
|
|
|
| 17 |
def preprocess_tweet(tweet: str) -> str:
|
| 18 |
tweet_words = []
|
| 19 |
for word in tweet.split(' '):
|
|
@@ -24,7 +23,7 @@ def preprocess_tweet(tweet: str) -> str:
|
|
| 24 |
tweet_words.append(word)
|
| 25 |
return " ".join(tweet_words)
|
| 26 |
|
| 27 |
-
def
|
| 28 |
inputs = tokenizer(tweet, return_tensors="np", padding=True, truncation=True)
|
| 29 |
ort_inputs = {k: v for k, v in inputs.items()}
|
| 30 |
ort_outs = session.run(None, ort_inputs)
|
|
@@ -35,42 +34,28 @@ def home():
|
|
| 35 |
return {"message": "Welcome to the sentiment analysis API"}
|
| 36 |
|
| 37 |
@app.get("/analyze")
|
| 38 |
-
def analyze_sentiment(tweet: str
|
| 39 |
# Preprocess the tweet
|
| 40 |
tweet_proc = preprocess_tweet(tweet)
|
| 41 |
|
| 42 |
# Measure the time taken for the inference
|
| 43 |
start_time = time.time()
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
results = pipe(tweet_proc, return_all_scores=True)
|
| 48 |
-
|
| 49 |
-
# Find the label with the highest score
|
| 50 |
-
highest_score_result = max(results[0], key=lambda x: x['score'])
|
| 51 |
-
label_map = {
|
| 52 |
-
"LABEL_0": "Negative",
|
| 53 |
-
"LABEL_1": "Neutral",
|
| 54 |
-
"LABEL_2": "Positive"
|
| 55 |
-
}
|
| 56 |
-
highest_label = label_map[highest_score_result['label']]
|
| 57 |
-
highest_score = round(highest_score_result['score'], 4)
|
| 58 |
-
|
| 59 |
-
elif method == "onnx":
|
| 60 |
-
# Run inference using the ONNX model
|
| 61 |
-
logits = run_inference_onnx(tweet_proc)
|
| 62 |
-
label_map = ["Negative", "Neutral", "Positive"]
|
| 63 |
-
highest_label_idx = np.argmax(logits)
|
| 64 |
-
highest_label = label_map[highest_label_idx]
|
| 65 |
-
highest_score = round(float(np.max(logits)), 4)
|
| 66 |
|
| 67 |
# Calculate the inference time
|
| 68 |
inference_time = time.time() - start_time
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
# Return the original tweet, the label with the highest score, and the inference time
|
| 71 |
return {
|
| 72 |
"text": tweet,
|
| 73 |
-
"label":
|
| 74 |
-
"score": highest_score,
|
| 75 |
"inference_time": round(inference_time, 4) # In seconds
|
| 76 |
}
|
|
|
|
|
|
|
|
|
|
| 1 |
import onnxruntime as ort
|
| 2 |
+
from transformers import AutoTokenizer
|
| 3 |
import numpy as np
|
| 4 |
import time
|
| 5 |
+
from fastapi import FastAPI
|
| 6 |
|
| 7 |
+
# Load the ONNX model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
onnx_model_path = "sentiment_model.onnx"
|
| 9 |
session = ort.InferenceSession(onnx_model_path)
|
| 10 |
+
|
| 11 |
+
# Load the tokenizer
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base-sentiment")
|
| 13 |
|
| 14 |
+
app = FastAPI()
|
| 15 |
+
|
| 16 |
def preprocess_tweet(tweet: str) -> str:
|
| 17 |
tweet_words = []
|
| 18 |
for word in tweet.split(' '):
|
|
|
|
| 23 |
tweet_words.append(word)
|
| 24 |
return " ".join(tweet_words)
|
| 25 |
|
| 26 |
+
def run_inference(tweet: str):
|
| 27 |
inputs = tokenizer(tweet, return_tensors="np", padding=True, truncation=True)
|
| 28 |
ort_inputs = {k: v for k, v in inputs.items()}
|
| 29 |
ort_outs = session.run(None, ort_inputs)
|
|
|
|
| 34 |
return {"message": "Welcome to the sentiment analysis API"}
|
| 35 |
|
| 36 |
@app.get("/analyze")
|
| 37 |
+
def analyze_sentiment(tweet: str):
|
| 38 |
# Preprocess the tweet
|
| 39 |
tweet_proc = preprocess_tweet(tweet)
|
| 40 |
|
| 41 |
# Measure the time taken for the inference
|
| 42 |
start_time = time.time()
|
| 43 |
|
| 44 |
+
# Run inference using ONNX model
|
| 45 |
+
logits = run_inference(tweet_proc)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
# Calculate the inference time
|
| 48 |
inference_time = time.time() - start_time
|
| 49 |
|
| 50 |
+
# Find the label with the highest score
|
| 51 |
+
label_map = ["Negative", "Neutral", "Positive"]
|
| 52 |
+
highest_label_idx = np.argmax(logits)
|
| 53 |
+
highest_score = np.max(logits)
|
| 54 |
+
|
| 55 |
# Return the original tweet, the label with the highest score, and the inference time
|
| 56 |
return {
|
| 57 |
"text": tweet,
|
| 58 |
+
"label": label_map[highest_label_idx],
|
| 59 |
+
"score": round(float(highest_score), 4),
|
| 60 |
"inference_time": round(inference_time, 4) # In seconds
|
| 61 |
}
|
export_to_onnx.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
| 2 |
+
import torch
|
| 3 |
+
from transformers.onnx import export
|
| 4 |
+
from transformers.onnx.features import FeaturesManager
|
| 5 |
+
|
| 6 |
+
# Load the model and tokenizer
|
| 7 |
+
model_name = "cardiffnlp/twitter-roberta-base-sentiment"
|
| 8 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
| 9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 10 |
+
|
| 11 |
+
# Determine the correct export feature
|
| 12 |
+
feature = FeaturesManager.get_supported_features_for_model_type("text-classification")
|
| 13 |
+
|
| 14 |
+
# Define the path where the ONNX model will be saved
|
| 15 |
+
onnx_model_path = "https://huggingface.co/spaces/Abineshkumar/demodeploy/sentiment_model.onnx"
|
| 16 |
+
|
| 17 |
+
# Export the model to ONNX
|
| 18 |
+
export(tokenizer, model, feature, onnx_model_path, opset=11, framework="pt")
|
requirements.txt
CHANGED
|
@@ -5,5 +5,8 @@ torch
|
|
| 5 |
scipy
|
| 6 |
onnx
|
| 7 |
onnxruntime
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
|
|
|
|
| 5 |
scipy
|
| 6 |
onnx
|
| 7 |
onnxruntime
|
| 8 |
+
onnxruntime-gpu
|
| 9 |
+
numpy
|
| 10 |
+
time
|
| 11 |
|
| 12 |
|