Update README.md
Browse files
README.md
CHANGED
|
@@ -88,13 +88,17 @@ Get the fairseq checkpoint [here](https://drive.proton.me/urls/P83GCPNM40#2f0f87
|
|
| 88 |
If you use GeistBERT in your research, please cite the following paper:
|
| 89 |
|
| 90 |
```
|
| 91 |
-
@
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
}
|
| 100 |
```
|
|
|
|
| 88 |
If you use GeistBERT in your research, please cite the following paper:
|
| 89 |
|
| 90 |
```
|
| 91 |
+
@book{scheible-schmitt-frei-2025-geistbert,
|
| 92 |
+
author = {\textbf{Scheible-Schmitt}, \textbf{Raphael} and Frei, Johann},
|
| 93 |
+
title = {GeistBERT: Breathing Life into German NLP},
|
| 94 |
+
booktitle = {Proceedings of the Workshop on Beyond English: Natural Language Processing for all Languages in an Era of Large Language Models},
|
| 95 |
+
month = {September},
|
| 96 |
+
year = {2025},
|
| 97 |
+
address = {Varna, Bulgaria},
|
| 98 |
+
publisher = {INCOMA Ltd., Shoumen, BULGARIA},
|
| 99 |
+
pages = {42--50},
|
| 100 |
+
abstract = {Advances in transformer-based language models have highlighted the benefits of language-specific pre-training on high-quality corpora. In this context, German NLP stands to gain from updated architectures and modern datasets tailored to the linguistic characteristics of the German language. GeistBERT seeks to improve German language processing by incrementally training on a diverse corpus and optimizing model performance across various NLP tasks. We pre-trained GeistBERT using fairseq, following the RoBERTa base configuration with Whole Word Masking (WWM), and initialized from GottBERT weights. The model was trained on a 1.3 TB German corpus with dynamic masking and a fixed sequence length of 512 tokens. For evaluation, we fine-tuned the model on standard downstream tasks, including NER (CoNLL 2003, GermEval 2014), text classification (GermEval 2018 coarse/fine, 10kGNAD), and NLI (German XNLI), using $F_1$ score and accuracy as evaluation metrics. GeistBERT achieved strong results across all tasks, leading among base models and setting a new state-of-the-art (SOTA) in GermEval 2018 fine text classification. It also outperformed several larger models, particularly in classification benchmarks. To support research in German NLP, we release GeistBERT under the MIT license.},
|
| 101 |
+
url = {https://aclanthology.org/2025.globalnlp-1.6},
|
| 102 |
+
doi = {https://doi.org/10.26615/978-954-452-105-9-006}
|
| 103 |
}
|
| 104 |
```
|