Spaces:
Sleeping
Sleeping
| import torch | |
| from transformers import AutoTokenizer,AutoModelForSequenceClassification,Trainer,TrainingArguments | |
| import torch.nn.functional as F | |
| from datasets import load_dataset | |
| model_name = 'thanhcong2001/Sentiment' | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| model = AutoModelForSequenceClassification.from_pretrained(model_name,num_labels = 2) | |
| # set up device | |
| device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
| model.to(device) | |
| def pre_sentiment(sentence): | |
| model.eval() | |
| inputs = tokenizer(sentence,return_tensors='pt',max_length=64,padding=True,truncation=True) | |
| inputs = {k:v for k,v in inputs.items()} | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| probs = F.softmax(outputs.logits,dim=-1) | |
| pre = torch.argmax(probs,dim=-1).item() | |
| score = torch.max(probs).item() | |
| result = 'POSITIVE' if pre == 1 else 'NEGATIVE' | |
| return f'Result: {result} - Score: {score}' | |
| import gradio as gr | |
| demo = gr.Interface(fn=pre_sentiment,inputs='text',outputs='text',title='Predict sentence') | |
| demo.launch() |