# Model State Checkpoint # Framework: transformers 4.35.0 format_version=3 hidden_size=768 num_attention_heads=12 num_hidden_layers=6 vocab_size=30522 intermediate_size=3072 hidden_act=gelu attention_probs_dropout_prob=0.1 hidden_dropout_prob=0.1 type_vocab_size=2 initializer_range=0.02 layer_norm_eps=1e-12 pad_token_id=0 position_embedding_type=absolute use_cache=true classifier_dropout=null