Transformers How to use llm-jp/lllm-jp-roberta-random-init with Transformers:
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("fill-mask", model="llm-jp/lllm-jp-roberta-random-init") # Load model directly
from transformers import AutoTokenizer, AutoModelForMaskedLM
tokenizer = AutoTokenizer.from_pretrained("llm-jp/lllm-jp-roberta-random-init")
model = AutoModelForMaskedLM.from_pretrained("llm-jp/lllm-jp-roberta-random-init")