| | --- |
| | language: |
| | - de |
| | --- |
| | |
| | library_name: transformers |
| | tags: |
| | - Text Classification |
| | - Pytorch |
| | - Discourse Classification |
| | - Roberta |
| | --- |
| | # Roberta for German Discourse Classification |
| | |
| | This is a xlm Roberta model finetuned on a German Discourse dataset of 60 discourses having a total over 10k sentences. |
| | |
| | |
| | ## How to use the model |
| | |
| | ```python |
| | import torch |
| | from transformers import AutoModelForSequenceClassification, AutoTokenizer |
| | |
| | def get_label(sentence): |
| | vectors = tokenizer(sentence, return_tensors='pt').to(device) |
| | outputs = bert_model(**vectors).logits |
| | probs = torch.nn.functional.softmax(outputs, dim = 1)[0] |
| | bert_dict = {} |
| | keys = ['Externalization', 'Elicitation', 'Conflict', 'Acceptence', 'Integration', 'None'] |
| | for i in range(len(keys)): |
| | bert_dict[keys[i]] = round(probs[i].item(), 3) |
| | return bert_dict |
| | |
| | MODEL_NAME = 'RashidNLP/Roberta-German-Discourse' |
| | MODEL_DIR = 'model' |
| | CHECKPOINT_DIR = 'checkpoints' |
| | device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
| | OUTPUTS = 6 |
| |
|
| | bert_model = AutoModelForSequenceClassification.from_pretrained(MODEL_NAME, num_labels = OUTPUTS).to(device) |
| | tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) |
| |
|
| | get_label("Gehst du zum Oktoberfest?") |
| | |
| | ``` |