Binary-Addition-LLM-POC / configuration_binaryllm.py
PhysiQuanty's picture
export inference-ready
a6c916c verified
raw
history blame contribute delete
896 Bytes
from transformers import PretrainedConfig
class BinaryLLMConfig(PretrainedConfig):
model_type = "binaryllm"
def __init__(
self,
vocab_size: int = 4,
hidden_size: int = 384,
num_hidden_layers: int = 6,
num_attention_heads: int = 6,
intermediate_size: int = 1536,
max_position_embeddings: int = 4096,
dropout: float = 0.1,
activation: str = "gelu",
**kwargs,
):
self.vocab_size = int(vocab_size)
self.hidden_size = int(hidden_size)
self.num_hidden_layers = int(num_hidden_layers)
self.num_attention_heads = int(num_attention_heads)
self.intermediate_size = int(intermediate_size)
self.max_position_embeddings = int(max_position_embeddings)
self.dropout = float(dropout)
self.activation = str(activation)
super().__init__(**kwargs)