|
|
--- |
|
|
license: apache-2.0 |
|
|
datasets: |
|
|
- ieuniversity/flirty_or_not |
|
|
language: |
|
|
- ko |
|
|
- en |
|
|
base_model: |
|
|
- monologg/koelectra-small-v3-discriminator |
|
|
--- |
|
|
|
|
|
|
|
|
|
|
|
# Is he flirting? |
|
|
|
|
|
Fine-tuned model that checks whether someone is flirting with you |
|
|
|
|
|
|
|
|
## Authors |
|
|
|
|
|
- [@DoTaeIn](https://www.github.com/DoTaeIn) |
|
|
|
|
|
|
|
|
## Running Tests |
|
|
|
|
|
To run tests, run the following command |
|
|
|
|
|
```python |
|
|
from transformers import AutoTokenizer, AutoModelForSequenceClassification |
|
|
``` |
|
|
|
|
|
```python |
|
|
tokenizer = AutoTokenizer.from_pretrained(path) |
|
|
model = AutoModelForSequenceClassification.from_pretrained(path) |
|
|
|
|
|
|
|
|
model.eval() |
|
|
text = "Input Text" |
|
|
|
|
|
device = next(model.parameters()).device |
|
|
|
|
|
inputs = tokenizer( |
|
|
text, |
|
|
return_tensors="pt", |
|
|
max_length=128, |
|
|
padding="max_length", |
|
|
truncation=True, |
|
|
return_token_type_ids=False |
|
|
) |
|
|
inputs = inputs.to(device) |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
out = model(**inputs) |
|
|
logits = out["logits"] |
|
|
probs = torch.softmax(logits, dim=-1) |
|
|
pred_class = torch.argmax(probs, dim=-1).item() |
|
|
prob_class0 = probs[0, 0].item() |
|
|
prob_class1 = probs[0, 1].item() |
|
|
|
|
|
print("pred:", "Flirting" if pred_class else "Not Flirting") |
|
|
print(f"prob class {pred_class}: {max(prob_class0, prob_class1)}", )``` |
|
|
|