Update README.md
Browse files
README.md
CHANGED
|
@@ -41,23 +41,60 @@ batch_size: 64
|
|
| 41 |
## How to Use the model:
|
| 42 |
```python
|
| 43 |
from transformers import pipeline
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
|
| 48 |
-
"""
|
| 49 |
-
output:
|
| 50 |
-
[[
|
| 51 |
-
{'label': 'sadness', 'score': 0.0005138228880241513},
|
| 52 |
-
{'label': 'joy', 'score': 0.9972520470619202},
|
| 53 |
-
{'label': 'love', 'score': 0.0007443308713845909},
|
| 54 |
-
{'label': 'anger', 'score': 0.0007404946954920888},
|
| 55 |
-
{'label': 'fear', 'score': 0.00032938539516180754},
|
| 56 |
-
{'label': 'surprise', 'score': 0.0004197491507511586}
|
| 57 |
-
]]
|
| 58 |
-
"""
|
| 59 |
```
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
### Referece
|
| 62 |
|
| 63 |
* bhadresh-savani/bert-base-uncased-emotion
|
|
|
|
| 41 |
## How to Use the model:
|
| 42 |
```python
|
| 43 |
from transformers import pipeline
|
| 44 |
+
model_path = "daveni/twitter-xlm-roberta-emotion-es"
|
| 45 |
+
emotion_analysis = pipeline("text-classification", framework="pt", model=model_path, tokenizer=model_path)
|
| 46 |
+
emotion_analysis("Einstein dijo: Solo hay dos cosas infinitas, el universo y los pinches anuncios de bitcoin en Twitter. Paren ya carajo aaaaaaghhgggghhh me quiero murir")
|
| 47 |
+
```
|
| 48 |
+
```
|
| 49 |
+
[{'label': 'anger', 'score': 0.48307016491889954}]
|
| 50 |
+
```
|
| 51 |
+
## Full classification example
|
| 52 |
+
```python
|
| 53 |
+
from transformers import AutoModelForSequenceClassification
|
| 54 |
+
from transformers import AutoTokenizer, AutoConfig
|
| 55 |
+
import numpy as np
|
| 56 |
+
from scipy.special import softmax
|
| 57 |
+
# Preprocess text (username and link placeholders)
|
| 58 |
+
def preprocess(text):
|
| 59 |
+
new_text = []
|
| 60 |
+
for t in text.split(" "):
|
| 61 |
+
t = '@user' if t.startswith('@') and len(t) > 1 else t
|
| 62 |
+
t = 'http' if t.startswith('http') else t
|
| 63 |
+
new_text.append(t)
|
| 64 |
+
return " ".join(new_text)
|
| 65 |
+
model_path = "Cesar42/bert-base-uncased-emotion_v2"
|
| 66 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path )
|
| 67 |
+
config = AutoConfig.from_pretrained(model_path )
|
| 68 |
+
# PT
|
| 69 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_path )
|
| 70 |
+
text = "Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal."
|
| 71 |
+
text = preprocess(text)
|
| 72 |
+
print(text)
|
| 73 |
+
encoded_input = tokenizer(text, return_tensors='pt')
|
| 74 |
+
output = model(**encoded_input)
|
| 75 |
+
scores = output[0][0].detach().numpy()
|
| 76 |
+
scores = softmax(scores)
|
| 77 |
+
# Print labels and scores
|
| 78 |
+
ranking = np.argsort(scores)
|
| 79 |
+
ranking = ranking[::-1]
|
| 80 |
+
for i in range(scores.shape[0]):
|
| 81 |
+
l = config.id2label[ranking[i]]
|
| 82 |
+
s = scores[ranking[i]]
|
| 83 |
+
print(f"{i+1}) {l} {np.round(float(s), 4)}")
|
| 84 |
+
```
|
| 85 |
+
Output:
|
| 86 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
```
|
| 88 |
+
Se ha quedao bonito día para publicar vídeo, ¿no? Hoy del tema más diferente que hemos tocado en el canal.
|
| 89 |
+
1) joy 0.7887
|
| 90 |
+
2) others 0.1679
|
| 91 |
+
3) surprise 0.0152
|
| 92 |
+
4) sadness 0.0145
|
| 93 |
+
5) anger 0.0077
|
| 94 |
+
6) disgust 0.0033
|
| 95 |
+
7) fear 0.0027
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
### Referece
|
| 99 |
|
| 100 |
* bhadresh-savani/bert-base-uncased-emotion
|