Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import AutoTokenizer,AutoModelForSequenceClassification,Trainer,TrainingArguments | |
| import torch.nn.functional as F | |
| from datasets import load_dataset | |
| labels = { | |
| 'Negative':0, | |
| 'Positive':1, | |
| 'Neutral':2, | |
| 'Irrelevant':3, | |
| } | |
| model_name = 'distilbert-base-uncased' | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name,num_labels=4) | |
| # Set up device | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| model.to(device) | |
| # create function to predict sentence | |
| def pre_sentiment(sentence): | |
| model.eval() | |
| inputs = tokenizer(sentence,return_tensors='pt',truncation=True,padding=True) | |
| inputs = {k:v.to(device) for k,v in inputs.items()} | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| probs = F.softmax(outputs.logits,dim=-1) | |
| pre = torch.argmax(probs,dim=-1).item() | |
| score = torch.max(probs).item() | |
| inv_label = {v:k for k,v in labels.items()} | |
| result = inv_label.get(pre,'Unknown') | |
| return result | |
| # create UI/UX | |
| import gradio as gr | |
| demo = gr.Interface(fn=pre_sentiment,inputs='text',outputs='text',title='Predict Sentiment') | |
| demo.launch() |