attention_logit_softcapping: null attention_scores_scalar: null attn_bias: false bias: false block_size: 2048 final_logit_softcapping: null gelu_approximate: none head_size: 128 hf_config: name: OLMo-1B-hf org: allenai intermediate_size: 8192 lm_head_bias: false mlp_class_name: LLaMAMLP moe_intermediate_size: null n_embd: 2048 n_expert: 0 n_expert_per_token: 0 n_head: 16 n_layer: 16 n_query_groups: 16 name: OLMo-1B-hf norm_1: true norm_2: true norm_class_name: LayerNorm norm_eps: 1.0e-05 norm_qk: false norm_qk_type: default padded_vocab_size: 50304 padding_multiple: 512 parallel_residual: false post_attention_norm: false post_mlp_norm: false rope_adjustments: null rope_base: 10000 rope_condense_ratio: 1 rope_indices: null rope_local_base_freq: null rotary_percentage: 1.0 scale_embeddings: false shared_attention_norm: false sliding_window_indices: null sliding_window_size: null vocab_size: 50280