# Load model directly
from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("kykim/bert-kor-base")
model = AutoModelForMaskedLM.from_pretrained("kykim/bert-kor-base")Quick Links
Bert base model for Korean
- 70GB Korean text dataset and 42000 lower-cased subwords are used
- Check the model performance and other language models for Korean in github
from transformers import BertTokenizerFast, BertModel
tokenizer_bert = BertTokenizerFast.from_pretrained("kykim/bert-kor-base")
model_bert = BertModel.from_pretrained("kykim/bert-kor-base")
- Downloads last month
- 90,395
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("fill-mask", model="kykim/bert-kor-base")