Update README.md
Browse files
README.md
CHANGED
|
@@ -23,7 +23,7 @@ The further pre-training is applied with a MLM objective (randomly masking 15\%
|
|
| 23 |
|
| 24 |
## BureauBERTo model can be loaded like:
|
| 25 |
|
| 26 |
-
```
|
| 27 |
from transformers import AutoModel, CamembertTokenizerFast
|
| 28 |
model_name = "colinglab/BureauBERTo"
|
| 29 |
tokenizer = CamembertTokenizerFast.from_pretrained(model_name)
|
|
@@ -37,7 +37,7 @@ If you find our resource or paper useful, please consider including the followin
|
|
| 37 |
@inproceedings{auriemma2023bureauberto,
|
| 38 |
address = {Pisa, Italy},
|
| 39 |
series = {{CEUR} {Workshop} {Proceedings}},
|
| 40 |
-
title = {{BureauBERTo}: adapting {UmBERTo} to the {Italian} bureaucratic language,
|
| 41 |
shorttitle = {{BureauBERTo}},
|
| 42 |
language = {en},
|
| 43 |
booktitle = {Ital-IA Convegno Nazionale CINI sull'Intelligenza Artificiale-Workshop on AI for Cybersecurity},
|
|
|
|
| 23 |
|
| 24 |
## BureauBERTo model can be loaded like:
|
| 25 |
|
| 26 |
+
```python
|
| 27 |
from transformers import AutoModel, CamembertTokenizerFast
|
| 28 |
model_name = "colinglab/BureauBERTo"
|
| 29 |
tokenizer = CamembertTokenizerFast.from_pretrained(model_name)
|
|
|
|
| 37 |
@inproceedings{auriemma2023bureauberto,
|
| 38 |
address = {Pisa, Italy},
|
| 39 |
series = {{CEUR} {Workshop} {Proceedings}},
|
| 40 |
+
title = {{BureauBERTo}: adapting {UmBERTo} to the {Italian} bureaucratic language},
|
| 41 |
shorttitle = {{BureauBERTo}},
|
| 42 |
language = {en},
|
| 43 |
booktitle = {Ital-IA Convegno Nazionale CINI sull'Intelligenza Artificiale-Workshop on AI for Cybersecurity},
|