File size: 273,260 Bytes
d67f29d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
{"repo_id":"ModuleFormer","entity_id":"py:setup","uri":"program://ModuleFormer/module/setup#L1-L8","kind":"module","name":"setup","path":"setup.py","language":"python","start_line":1,"end_line":8,"context_start_line":1,"context_end_line":8,"code":"from setuptools import setup, find_packages\n\nsetup(name='moduleformer',\n      packages=find_packages(), \n      install_requires=[\n            'torch',\n            'transformers'\n      ])","source_hash":"f2728f0becd30c58953633136656272ca1265d87d0af7c399aa048edc941b2e8","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:test","uri":"program://ModuleFormer/module/test#L1-L38","kind":"module","name":"test","path":"test.py","language":"python","start_line":1,"end_line":38,"context_start_line":1,"context_end_line":38,"code":"import torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, AutoModelForSequenceClassification\nfrom obsidian import SparseGPTForCausalLM, SparseGPTConfig, SparseGPTForSequenceClassification\nAutoConfig.register(\"sparsegpt\", SparseGPTConfig)\nAutoModelForCausalLM.register(SparseGPTConfig, SparseGPTForCausalLM)\nAutoModelForSequenceClassification.register(SparseGPTConfig, SparseGPTForSequenceClassification)\n\nmodel_path = \"/dccstor/codeai/yikang/pretrained_models/obsidian-8b-dolly\"\n\nmodel = AutoModelForSequenceClassification.from_pretrained(model_path)\n\nx = torch.randint(low=10, high=101, size=(5, 7))\n\n# 选择模型和tokenizer\ntokenizer = AutoTokenizer.from_pretrained(model_path)\n\n# 输入文本\ntext = \"The quick brown fox jumps over the lazy dog\"\n\n# 对文本进行 tokenization 和 padding\ninput_ids = tokenizer.encode(text, return_tensors=\"pt\")\n\ny = model(input_ids)\n\nprint(y)\n\n# print(input_ids.shape)\n# for i, o in enumerate(y):\n#     print(i, type(o), (o.shape if 'ensor' in str(type(o)) else o))\n\n# logits = y.logits\n# print(logits.shape)\n\n# prob = logits.softmax(dim = -1)\n\n# for i in range(1, input_ids.shape[1]):\n#     print(input_ids[0,i], prob[0,i-1,input_ids[0,i]])\n","source_hash":"5702c37d1db11fca9e03bf54f18a031e8c55a8eb91df120ec19b04d6ccd83d14","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer","uri":"program://ModuleFormer/module/moduleformer.modeling_moduleformer#L1-L848","kind":"module","name":"moduleformer.modeling_moduleformer","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":1,"end_line":848,"context_start_line":1,"context_end_line":848,"code":"\"\"\" PyTorch ModuleFormer model.\"\"\"\n\nfrom typing import Optional, Tuple, Union\nimport math\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss\nfrom torch.nn import functional as F\n\nfrom transformers.activations import get_activation\nfrom transformers.modeling_outputs import (\n    BaseModelOutputWithPast, \n    CausalLMOutputWithPast,\n    SequenceClassifierOutputWithPast\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_moduleformer import ModuleFormerConfig\nfrom .utils.moe import MoE\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"moduleformer-small\"\n_CONFIG_FOR_DOC = \"ModuleFormerConfig\"\n\n\n# SPARSEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n#     \"moduleformer-small\",\n#     # See all ModuleFormer models at https://huggingface.co/models?filter=moduleformer\n# ]\n\n\n@torch.jit.script\ndef stickbreaking_att(\n    q: torch.Tensor, \n    k: torch.Tensor, \n    v: torch.Tensor, \n    mask: torch.Tensor, \n    cum_weight: torch.Tensor,\n    att_mask: Optional[torch.FloatTensor] = None,\n    ) -> Tuple[torch.Tensor, torch.Tensor]:\n    \"\"\"\n    Compute stick-breaking attention weights.\n\n    Args:\n        q (torch.Tensor): Query tensor.\n        k (torch.Tensor): Key tensor.\n        v (torch.Tensor): Value tensor.\n        mask (torch.Tensor): Mask tensor.\n        cum_weight (torch.Tensor): Cumulative weight tensor.\n        att_mask (Optional[torch.FloatTensor]): Attention mask tensor (default: None).\n\n    Returns:\n        Tuple[torch.Tensor, torch.Tensor]: Tuple containing the output tensor and attention weights.\n    \"\"\"\n    logits = torch.einsum('bikhd,bjhd->bkhij', q, k) / math.sqrt(k.size(-1))\n    mask = (mask[None, None, None, :, :] == 0).expand_as(logits)\n    logits = logits + att_mask if att_mask is not None else logits\n    z = F.sigmoid(logits).masked_fill(mask, 0)\n    log_beta = F.logsigmoid(-logits).masked_fill(mask, 0)\n    re_cum_log_beta = torch.einsum('bnhij,jk->bnhik', log_beta, cum_weight)\n    att = z * re_cum_log_beta.exp()\n    y = torch.einsum('bkhij,bjhd->bikhd', att, v)\n    return y, att\n\n\nclass ModuleFormerAttention(nn.Module):\n    def __init__(self, config):\n        \"\"\"\n        Initialize the ModuleFormerAttention module.\n\n        Args:\n            config: Configuration object with model hyperparameters.\n        \"\"\"\n        super().__init__()\n        \n        self.q_proj = MoE(\n                input_size=config.n_embd, \n                head_size=config.att_hidden, \n                num_experts=config.n_att_experts, \n                top_k=config.k_att,\n                acc_aux_loss=False, \n                bias=False,\n                gating_dropout=config.moe_pdrop,\n                sample_topk=config.sample_topk,\n                gating_size=config.gating_size,\n                aux_loss=config.aux_loss_type,\n                gate_type=config.gate_type,\n            )\n        if config.att_hidden == config.n_embd and config.n_head == 1:\n            self.k_proj = nn.Identity()\n            self.v_proj = nn.Identity()\n        else:\n            self.k_proj = nn.Linear(config.n_embd, config.att_hidden)\n            self.v_proj = nn.Linear(config.n_embd, config.att_hidden)\n\n        # regularization\n        self.attn_dropout = nn.Dropout(config.attn_pdrop)\n        # causal mask to ensure that attention is only applied to the left in the input sequence\n\n        self.context_length = config.history_length + config.block_size\n\n        self.register_buffer(\n            \"mask\", \n            torch.tril(torch.ones(self.context_length, self.context_length, dtype=torch.int8))\n        )\n        self.register_buffer(\n            \"cum_weight\", \n            torch.tril(torch.ones(self.context_length, self.context_length), -1)\n        )\n        self.n_head = config.n_head\n        self.top_k = config.k_att\n        self.n_embd = config.n_embd\n        self.att_hidden = config.att_hidden\n        self.head_size = config.att_hidden // config.n_head\n\n    def add_history(self, k, v, hidden, use_cache=False):\n        \"\"\"\n        Add history to key and value tensors.\n\n        Args:\n            k (torch.Tensor): Key tensor.\n            v (torch.Tensor): Value tensor.\n            hidden: Hidden state.\n            use_cache (bool): Whether to use cached history.\n\n        Returns:\n            Tuple[torch.Tensor, torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: Updated key, value, and history.\n        \"\"\"\n        if hidden is None or not use_cache:\n            new_k = k\n            new_v = v\n        else:\n            k_history, v_history = hidden\n            new_k = torch.cat([k_history, k], dim=1)\n            new_v = torch.cat([v_history, v], dim=1)\n        k_history = new_k.detach()\n        v_history = new_v.detach()\n\n        return new_k, new_v, (k_history, v_history)\n\n    def forward(\n        self,\n        hidden_states: Optional[torch.FloatTensor],\n        attention_mask: Optional[torch.FloatTensor] = None,\n        layer_past: Optional[Tuple[torch.Tensor]] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = False,\n        output_attentions: Optional[bool] = False,\n    ) -> Union[\n        Tuple[torch.Tensor, Tuple[torch.Tensor]],\n        Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],\n    ]:\n        \"\"\"\n        Forward pass of the ModuleFormerAttention module.\n\n        Args:\n            hidden_states (Optional[torch.FloatTensor]): Input hidden states.\n            attention_mask (Optional[torch.FloatTensor]): Attention mask.\n            layer_past (Optional[Tuple[torch.Tensor]]): Past layer state.\n            head_mask (Optional[torch.FloatTensor]): Head mask.\n            use_cache (Optional[bool]): Whether to use cached states.\n            output_attentions (Optional[bool]): Whether to output attention weights.\n\n        Returns:\n            Union[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[...]]]: Tuple containing outputs.\n        \"\"\"\n        B, T, C = hidden_states.size() # batch size, sequence length, embedding dimensionality (n_embd)\n\n        # calculate query, key, values \n        q, aux_loss = self.q_proj.map(hidden_states)\n        k = self.k_proj(hidden_states)\n        v = self.v_proj(hidden_states)\n\n        k, v, hidden = self.add_history(k, v, layer_past, use_cache)\n        context_length = k.size(1)\n        \n        q = q.view(B, T, self.top_k, self.n_head, self.head_size) # (B, T, k, nh, hs)\n        k = k.view(B, context_length, self.n_head, self.head_size) # (B, T, nh, hs)\n        v = v.view(B, context_length, self.n_head, self.head_size) # (B, T, nh, hs)\n\n        mask = torch.tril(torch.ones(context_length, context_length, dtype=torch.int8, device=q.device))[context_length - T:, :]\n        cum_weight=torch.tril(torch.ones(context_length, context_length, device=q.device), -1).type_as(q)\n\n        y, attn_weights = stickbreaking_att(q, k, v, mask=mask, cum_weight=cum_weight, att_mask=attention_mask)\n\n        # output projection\n        y = self.q_proj.reduce(y.reshape(B, T, self.top_k, self.att_hidden).type_as(hidden_states))\n\n        y = y.view(B, T, C) # re-assemble all head outputs side by side\n\n        outputs = (y, hidden, aux_loss)\n        if output_attentions:\n            outputs += (attn_weights,)\n        return outputs\n\n\nclass ModuleFormerBlock(nn.Module):\n    def __init__(self, config):\n        \"\"\"\n        Initialize the ModuleFormerBlock module.\n\n        Args:\n            config: Configuration object with model hyperparameters.\n        \"\"\"\n        super().__init__()\n        self.ln_1 = nn.LayerNorm(config.n_embd)\n        self.attn = ModuleFormerAttention(config)\n        self.ln_2 = nn.LayerNorm(config.n_embd)\n        self.mlpf = MoE(\n                input_size=config.n_embd, \n                head_size=config.ffd_hidden, \n                num_experts=config.n_mlp_experts, \n                top_k=config.k_mlp, \n                bias=False, \n                activation=get_activation(config.activation_function),\n                acc_aux_loss=False,\n                gating_dropout=config.moe_pdrop,\n                sample_topk=config.sample_topk,\n                gating_size=config.gating_size,\n                aux_loss=config.aux_loss_type,\n                gate_type=config.gate_type,\n            )\n        self.resid_dropout = nn.Dropout(config.resid_pdrop)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get auxiliary loss and clear auxiliary loss accumulators in the attention and MLP layers.\n\n        Returns:\n            torch.Tensor: Auxiliary loss.\n        \"\"\"\n        return self.attn.q_proj.get_aux_loss_and_clear() + self.mlpf.get_aux_loss_and_clear()\n\n\n    def forward(\n        self,\n        hidden_states: Optional[torch.FloatTensor],\n        layer_past: Optional[Tuple[torch.Tensor]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = False,\n        output_attentions: Optional[bool] = False,\n    ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:\n        \"\"\"\n        Forward pass of the ModuleFormerBlock module.\n\n        Args:\n            hidden_states (Optional[torch.FloatTensor]): Input hidden states.\n            layer_past (Optional[Tuple[torch.Tensor]]): Past layer state.\n            attention_mask (Optional[torch.FloatTensor]): Attention mask.\n            head_mask (Optional[torch.FloatTensor]): Head mask.\n            use_cache (Optional[bool]): Whether to use cached states.\n            output_attentions (Optional[bool]): Whether to output attention weights.\n\n        Returns:\n            Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:\n            Tuple containing outputs or optional attention weights.\n        \"\"\"\n        attn_outputs = self.attn(\n            self.ln_1(hidden_states),\n            layer_past=layer_past,\n            attention_mask=attention_mask,\n            head_mask=head_mask,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n        )\n        attn_output = attn_outputs[0]  # output_attn: a, present, (attentions)\n        hidden = attn_outputs[1]\n        att_aux_loss = attn_outputs[2]\n\n        hidden_states = hidden_states + self.resid_dropout(attn_output)\n        x_mlp, mlp_aux_loss = self.mlpf(self.ln_2(hidden_states))\n        hidden_states = hidden_states + self.resid_dropout(x_mlp)\n\n        aux_loss = att_aux_loss + mlp_aux_loss\n        return (hidden_states, hidden, aux_loss) + attn_outputs[3:]\n\n\nclass ModuleFormerPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n    models.\n    \"\"\"\n\n    config_class = ModuleFormerConfig\n    base_model_prefix = \"transformer\"\n    supports_gradient_checkpointing = True\n    _no_split_modules = [\"ModuleFormerBlock\"]\n\n    def __init__(self, *inputs, **kwargs):\n        \"\"\"\n        Initialize the ModuleFormerPreTrainedModel.\n\n        Args:\n            *inputs: Variable length input arguments.\n            **kwargs: Keyword arguments.\n        \"\"\"\n        super().__init__(*inputs, **kwargs)\n\n        self.gradient_checkpointing = False\n\n    def _init_weights(self, module):\n        \"\"\"Initialize the weights.\"\"\"\n        if isinstance(module, (nn.Linear,)):\n            # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization\n            # cf https://github.com/pytorch/pytorch/pull/5617\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n        elif isinstance(module, nn.LayerNorm):\n            module.bias.data.zero_()\n            module.weight.data.fill_(1.0)\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs={}):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, True, gradient_checkpointing_kwargs\n                )\n\n    def gradient_checkpointing_disable(self):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, False\n                )\n\n    def _set_gradient_checkpointing(\n        self,\n        module,\n        value=False,\n        gradient_checkpointing_kwargs={\"use_reentrant\": False},\n    ):\n        \"\"\"\n        Set gradient checkpointing for the ModuleFormerModel.\n\n        Args:\n            module: The module for which gradient checkpointing is set.\n            value (bool): Whether to enable gradient checkpointing.\n        \"\"\"\n        if isinstance(module, ModuleFormerModel):\n            module.gradient_checkpointing = value\n            module.gradient_checkpointing_kwargs = gradient_checkpointing_kwargs\n\n\nSPARSEGPT_START_DOCSTRING = r\"\"\"\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ModuleFormerConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nSPARSEGPT_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.\n\n            [What are input IDs?](../glossary#input-ids)\n        attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):\n            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:\n\n            - 1 for tokens that are **not masked**,\n            - 0 for tokens that are **masked**.\n\n            [What are attention masks?](../glossary#attention-mask)\n        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,\n            1]`:\n\n            - 0 corresponds to a *sentence A* token,\n            - 1 corresponds to a *sentence B* token.\n\n            [What are token type IDs?](../glossary#token-type-ids)\n        position_ids (`torch.LongTensor` of shape `({0})`, *optional*):\n            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,\n            config.n_positions - 1]`.\n\n            [What are position IDs?](../glossary#position-ids)\n        head_mask (`torch.FloatTensor` of shape `(num_attention_heads,)` or `(n_layer, num_attention_heads)`, *optional*):\n            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:\n\n            - 1 indicates the head is **not masked**,\n            - 0 indicates the head is **masked**.\n\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare ModuleFormer Model transformer outputting raw hidden-states without any specific head on top.\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerModel(ModuleFormerPreTrainedModel):\n    def __init__(self, config):\n        super().__init__(config)\n\n        self.embed_dim = config.n_embd\n        self.vocab_size = config.vocab_size\n        self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n        self.drop = nn.Dropout(config.embd_pdrop)\n        self.h = nn.ModuleList([ModuleFormerBlock(config) for _ in range(config.n_layer)])\n        self.ln_f = nn.LayerNorm(config.n_embd)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.wte\n\n    def set_input_embeddings(self, new_embeddings):\n        self.wte = new_embeddings\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=BaseModelOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        token_type_ids: Optional[torch.LongTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, BaseModelOutputWithPast]:\n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = (\n            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        if input_ids is not None and inputs_embeds is not None:\n            raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n        elif input_ids is not None:\n            input_shape = input_ids.size()\n            input_ids = input_ids.view(-1, input_shape[-1])\n            batch_size = input_ids.shape[0]\n        elif inputs_embeds is not None:\n            input_shape = inputs_embeds.size()[:-1]\n            batch_size = inputs_embeds.shape[0]\n        else:\n            raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n        if token_type_ids is not None:\n            token_type_ids = token_type_ids.view(-1, input_shape[-1])\n\n        if past_key_values is None:\n            past_key_values = tuple([None] * len(self.h))\n\n        # Attention mask.\n        if attention_mask is not None:\n            if batch_size <= 0:\n                raise ValueError(\"batch_size has to be defined and > 0\")\n            attention_mask = attention_mask.view(batch_size, -1)\n            # We create a 3D attention mask from a 2D tensor mask.\n            # Sizes are [batch_size,\n# ... truncated ...","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":true}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.stickbreaking_att","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.stickbreaking_att#L37-L67","kind":"function","name":"stickbreaking_att","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":37,"end_line":67,"context_start_line":17,"context_end_line":87,"code":")\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_moduleformer import ModuleFormerConfig\nfrom .utils.moe import MoE\n\n\nlogger = logging.get_logger(__name__)\n\n_CHECKPOINT_FOR_DOC = \"moduleformer-small\"\n_CONFIG_FOR_DOC = \"ModuleFormerConfig\"\n\n\n# SPARSEGPT_PRETRAINED_MODEL_ARCHIVE_LIST = [\n#     \"moduleformer-small\",\n#     # See all ModuleFormer models at https://huggingface.co/models?filter=moduleformer\n# ]\n\n\n@torch.jit.script\ndef stickbreaking_att(\n    q: torch.Tensor, \n    k: torch.Tensor, \n    v: torch.Tensor, \n    mask: torch.Tensor, \n    cum_weight: torch.Tensor,\n    att_mask: Optional[torch.FloatTensor] = None,\n    ) -> Tuple[torch.Tensor, torch.Tensor]:\n    \"\"\"\n    Compute stick-breaking attention weights.\n\n    Args:\n        q (torch.Tensor): Query tensor.\n        k (torch.Tensor): Key tensor.\n        v (torch.Tensor): Value tensor.\n        mask (torch.Tensor): Mask tensor.\n        cum_weight (torch.Tensor): Cumulative weight tensor.\n        att_mask (Optional[torch.FloatTensor]): Attention mask tensor (default: None).\n\n    Returns:\n        Tuple[torch.Tensor, torch.Tensor]: Tuple containing the output tensor and attention weights.\n    \"\"\"\n    logits = torch.einsum('bikhd,bjhd->bkhij', q, k) / math.sqrt(k.size(-1))\n    mask = (mask[None, None, None, :, :] == 0).expand_as(logits)\n    logits = logits + att_mask if att_mask is not None else logits\n    z = F.sigmoid(logits).masked_fill(mask, 0)\n    log_beta = F.logsigmoid(-logits).masked_fill(mask, 0)\n    re_cum_log_beta = torch.einsum('bnhij,jk->bnhik', log_beta, cum_weight)\n    att = z * re_cum_log_beta.exp()\n    y = torch.einsum('bkhij,bjhd->bikhd', att, v)\n    return y, att\n\n\nclass ModuleFormerAttention(nn.Module):\n    def __init__(self, config):\n        \"\"\"\n        Initialize the ModuleFormerAttention module.\n\n        Args:\n            config: Configuration object with model hyperparameters.\n        \"\"\"\n        super().__init__()\n        \n        self.q_proj = MoE(\n                input_size=config.n_embd, \n                head_size=config.att_hidden, \n                num_experts=config.n_att_experts, \n                top_k=config.k_att,\n                acc_aux_loss=False, \n                bias=False,\n                gating_dropout=config.moe_pdrop,","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.ModuleFormerAttention","uri":"program://ModuleFormer/class/moduleformer.modeling_moduleformer.ModuleFormerAttention#L70-L198","kind":"class","name":"ModuleFormerAttention","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":70,"end_line":198,"context_start_line":50,"context_end_line":218,"code":"        k (torch.Tensor): Key tensor.\n        v (torch.Tensor): Value tensor.\n        mask (torch.Tensor): Mask tensor.\n        cum_weight (torch.Tensor): Cumulative weight tensor.\n        att_mask (Optional[torch.FloatTensor]): Attention mask tensor (default: None).\n\n    Returns:\n        Tuple[torch.Tensor, torch.Tensor]: Tuple containing the output tensor and attention weights.\n    \"\"\"\n    logits = torch.einsum('bikhd,bjhd->bkhij', q, k) / math.sqrt(k.size(-1))\n    mask = (mask[None, None, None, :, :] == 0).expand_as(logits)\n    logits = logits + att_mask if att_mask is not None else logits\n    z = F.sigmoid(logits).masked_fill(mask, 0)\n    log_beta = F.logsigmoid(-logits).masked_fill(mask, 0)\n    re_cum_log_beta = torch.einsum('bnhij,jk->bnhik', log_beta, cum_weight)\n    att = z * re_cum_log_beta.exp()\n    y = torch.einsum('bkhij,bjhd->bikhd', att, v)\n    return y, att\n\n\nclass ModuleFormerAttention(nn.Module):\n    def __init__(self, config):\n        \"\"\"\n        Initialize the ModuleFormerAttention module.\n\n        Args:\n            config: Configuration object with model hyperparameters.\n        \"\"\"\n        super().__init__()\n        \n        self.q_proj = MoE(\n                input_size=config.n_embd, \n                head_size=config.att_hidden, \n                num_experts=config.n_att_experts, \n                top_k=config.k_att,\n                acc_aux_loss=False, \n                bias=False,\n                gating_dropout=config.moe_pdrop,\n                sample_topk=config.sample_topk,\n                gating_size=config.gating_size,\n                aux_loss=config.aux_loss_type,\n                gate_type=config.gate_type,\n            )\n        if config.att_hidden == config.n_embd and config.n_head == 1:\n            self.k_proj = nn.Identity()\n            self.v_proj = nn.Identity()\n        else:\n            self.k_proj = nn.Linear(config.n_embd, config.att_hidden)\n            self.v_proj = nn.Linear(config.n_embd, config.att_hidden)\n\n        # regularization\n        self.attn_dropout = nn.Dropout(config.attn_pdrop)\n        # causal mask to ensure that attention is only applied to the left in the input sequence\n\n        self.context_length = config.history_length + config.block_size\n\n        self.register_buffer(\n            \"mask\", \n            torch.tril(torch.ones(self.context_length, self.context_length, dtype=torch.int8))\n        )\n        self.register_buffer(\n            \"cum_weight\", \n            torch.tril(torch.ones(self.context_length, self.context_length), -1)\n        )\n        self.n_head = config.n_head\n        self.top_k = config.k_att\n        self.n_embd = config.n_embd\n        self.att_hidden = config.att_hidden\n        self.head_size = config.att_hidden // config.n_head\n\n    def add_history(self, k, v, hidden, use_cache=False):\n        \"\"\"\n        Add history to key and value tensors.\n\n        Args:\n            k (torch.Tensor): Key tensor.\n            v (torch.Tensor): Value tensor.\n            hidden: Hidden state.\n            use_cache (bool): Whether to use cached history.\n\n        Returns:\n            Tuple[torch.Tensor, torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: Updated key, value, and history.\n        \"\"\"\n        if hidden is None or not use_cache:\n            new_k = k\n            new_v = v\n        else:\n            k_history, v_history = hidden\n            new_k = torch.cat([k_history, k], dim=1)\n            new_v = torch.cat([v_history, v], dim=1)\n        k_history = new_k.detach()\n        v_history = new_v.detach()\n\n        return new_k, new_v, (k_history, v_history)\n\n    def forward(\n        self,\n        hidden_states: Optional[torch.FloatTensor],\n        attention_mask: Optional[torch.FloatTensor] = None,\n        layer_past: Optional[Tuple[torch.Tensor]] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = False,\n        output_attentions: Optional[bool] = False,\n    ) -> Union[\n        Tuple[torch.Tensor, Tuple[torch.Tensor]],\n        Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],\n    ]:\n        \"\"\"\n        Forward pass of the ModuleFormerAttention module.\n\n        Args:\n            hidden_states (Optional[torch.FloatTensor]): Input hidden states.\n            attention_mask (Optional[torch.FloatTensor]): Attention mask.\n            layer_past (Optional[Tuple[torch.Tensor]]): Past layer state.\n            head_mask (Optional[torch.FloatTensor]): Head mask.\n            use_cache (Optional[bool]): Whether to use cached states.\n            output_attentions (Optional[bool]): Whether to output attention weights.\n\n        Returns:\n            Union[Tuple[torch.Tensor, Tuple[torch.Tensor]], Optional[Tuple[...]]]: Tuple containing outputs.\n        \"\"\"\n        B, T, C = hidden_states.size() # batch size, sequence length, embedding dimensionality (n_embd)\n\n        # calculate query, key, values \n        q, aux_loss = self.q_proj.map(hidden_states)\n        k = self.k_proj(hidden_states)\n        v = self.v_proj(hidden_states)\n\n        k, v, hidden = self.add_history(k, v, layer_past, use_cache)\n        context_length = k.size(1)\n        \n        q = q.view(B, T, self.top_k, self.n_head, self.head_size) # (B, T, k, nh, hs)\n        k = k.view(B, context_length, self.n_head, self.head_size) # (B, T, nh, hs)\n        v = v.view(B, context_length, self.n_head, self.head_size) # (B, T, nh, hs)\n\n        mask = torch.tril(torch.ones(context_length, context_length, dtype=torch.int8, device=q.device))[context_length - T:, :]\n        cum_weight=torch.tril(torch.ones(context_length, context_length, device=q.device), -1).type_as(q)\n\n        y, attn_weights = stickbreaking_att(q, k, v, mask=mask, cum_weight=cum_weight, att_mask=attention_mask)\n\n        # output projection\n        y = self.q_proj.reduce(y.reshape(B, T, self.top_k, self.att_hidden).type_as(hidden_states))\n\n        y = y.view(B, T, C) # re-assemble all head outputs side by side\n\n        outputs = (y, hidden, aux_loss)\n        if output_attentions:\n            outputs += (attn_weights,)\n        return outputs\n\n\nclass ModuleFormerBlock(nn.Module):\n    def __init__(self, config):\n        \"\"\"\n        Initialize the ModuleFormerBlock module.\n\n        Args:\n            config: Configuration object with model hyperparameters.\n        \"\"\"\n        super().__init__()\n        self.ln_1 = nn.LayerNorm(config.n_embd)\n        self.attn = ModuleFormerAttention(config)\n        self.ln_2 = nn.LayerNorm(config.n_embd)\n        self.mlpf = MoE(\n                input_size=config.n_embd, \n                head_size=config.ffd_hidden, \n                num_experts=config.n_mlp_experts, \n                top_k=config.k_mlp, \n                bias=False, ","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.ModuleFormerBlock","uri":"program://ModuleFormer/class/moduleformer.modeling_moduleformer.ModuleFormerBlock#L201-L280","kind":"class","name":"ModuleFormerBlock","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":201,"end_line":280,"context_start_line":181,"context_end_line":300,"code":"        q = q.view(B, T, self.top_k, self.n_head, self.head_size) # (B, T, k, nh, hs)\n        k = k.view(B, context_length, self.n_head, self.head_size) # (B, T, nh, hs)\n        v = v.view(B, context_length, self.n_head, self.head_size) # (B, T, nh, hs)\n\n        mask = torch.tril(torch.ones(context_length, context_length, dtype=torch.int8, device=q.device))[context_length - T:, :]\n        cum_weight=torch.tril(torch.ones(context_length, context_length, device=q.device), -1).type_as(q)\n\n        y, attn_weights = stickbreaking_att(q, k, v, mask=mask, cum_weight=cum_weight, att_mask=attention_mask)\n\n        # output projection\n        y = self.q_proj.reduce(y.reshape(B, T, self.top_k, self.att_hidden).type_as(hidden_states))\n\n        y = y.view(B, T, C) # re-assemble all head outputs side by side\n\n        outputs = (y, hidden, aux_loss)\n        if output_attentions:\n            outputs += (attn_weights,)\n        return outputs\n\n\nclass ModuleFormerBlock(nn.Module):\n    def __init__(self, config):\n        \"\"\"\n        Initialize the ModuleFormerBlock module.\n\n        Args:\n            config: Configuration object with model hyperparameters.\n        \"\"\"\n        super().__init__()\n        self.ln_1 = nn.LayerNorm(config.n_embd)\n        self.attn = ModuleFormerAttention(config)\n        self.ln_2 = nn.LayerNorm(config.n_embd)\n        self.mlpf = MoE(\n                input_size=config.n_embd, \n                head_size=config.ffd_hidden, \n                num_experts=config.n_mlp_experts, \n                top_k=config.k_mlp, \n                bias=False, \n                activation=get_activation(config.activation_function),\n                acc_aux_loss=False,\n                gating_dropout=config.moe_pdrop,\n                sample_topk=config.sample_topk,\n                gating_size=config.gating_size,\n                aux_loss=config.aux_loss_type,\n                gate_type=config.gate_type,\n            )\n        self.resid_dropout = nn.Dropout(config.resid_pdrop)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get auxiliary loss and clear auxiliary loss accumulators in the attention and MLP layers.\n\n        Returns:\n            torch.Tensor: Auxiliary loss.\n        \"\"\"\n        return self.attn.q_proj.get_aux_loss_and_clear() + self.mlpf.get_aux_loss_and_clear()\n\n\n    def forward(\n        self,\n        hidden_states: Optional[torch.FloatTensor],\n        layer_past: Optional[Tuple[torch.Tensor]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = False,\n        output_attentions: Optional[bool] = False,\n    ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:\n        \"\"\"\n        Forward pass of the ModuleFormerBlock module.\n\n        Args:\n            hidden_states (Optional[torch.FloatTensor]): Input hidden states.\n            layer_past (Optional[Tuple[torch.Tensor]]): Past layer state.\n            attention_mask (Optional[torch.FloatTensor]): Attention mask.\n            head_mask (Optional[torch.FloatTensor]): Head mask.\n            use_cache (Optional[bool]): Whether to use cached states.\n            output_attentions (Optional[bool]): Whether to output attention weights.\n\n        Returns:\n            Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:\n            Tuple containing outputs or optional attention weights.\n        \"\"\"\n        attn_outputs = self.attn(\n            self.ln_1(hidden_states),\n            layer_past=layer_past,\n            attention_mask=attention_mask,\n            head_mask=head_mask,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n        )\n        attn_output = attn_outputs[0]  # output_attn: a, present, (attentions)\n        hidden = attn_outputs[1]\n        att_aux_loss = attn_outputs[2]\n\n        hidden_states = hidden_states + self.resid_dropout(attn_output)\n        x_mlp, mlp_aux_loss = self.mlpf(self.ln_2(hidden_states))\n        hidden_states = hidden_states + self.resid_dropout(x_mlp)\n\n        aux_loss = att_aux_loss + mlp_aux_loss\n        return (hidden_states, hidden, aux_loss) + attn_outputs[3:]\n\n\nclass ModuleFormerPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n    models.\n    \"\"\"\n\n    config_class = ModuleFormerConfig\n    base_model_prefix = \"transformer\"\n    supports_gradient_checkpointing = True\n    _no_split_modules = [\"ModuleFormerBlock\"]\n\n    def __init__(self, *inputs, **kwargs):\n        \"\"\"\n        Initialize the ModuleFormerPreTrainedModel.\n\n        Args:\n            *inputs: Variable length input arguments.\n            **kwargs: Keyword arguments.","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.ModuleFormerPreTrainedModel","uri":"program://ModuleFormer/class/moduleformer.modeling_moduleformer.ModuleFormerPreTrainedModel#L283-L351","kind":"class","name":"ModuleFormerPreTrainedModel","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":283,"end_line":351,"context_start_line":263,"context_end_line":371,"code":"        attn_outputs = self.attn(\n            self.ln_1(hidden_states),\n            layer_past=layer_past,\n            attention_mask=attention_mask,\n            head_mask=head_mask,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n        )\n        attn_output = attn_outputs[0]  # output_attn: a, present, (attentions)\n        hidden = attn_outputs[1]\n        att_aux_loss = attn_outputs[2]\n\n        hidden_states = hidden_states + self.resid_dropout(attn_output)\n        x_mlp, mlp_aux_loss = self.mlpf(self.ln_2(hidden_states))\n        hidden_states = hidden_states + self.resid_dropout(x_mlp)\n\n        aux_loss = att_aux_loss + mlp_aux_loss\n        return (hidden_states, hidden, aux_loss) + attn_outputs[3:]\n\n\nclass ModuleFormerPreTrainedModel(PreTrainedModel):\n    \"\"\"\n    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained\n    models.\n    \"\"\"\n\n    config_class = ModuleFormerConfig\n    base_model_prefix = \"transformer\"\n    supports_gradient_checkpointing = True\n    _no_split_modules = [\"ModuleFormerBlock\"]\n\n    def __init__(self, *inputs, **kwargs):\n        \"\"\"\n        Initialize the ModuleFormerPreTrainedModel.\n\n        Args:\n            *inputs: Variable length input arguments.\n            **kwargs: Keyword arguments.\n        \"\"\"\n        super().__init__(*inputs, **kwargs)\n\n        self.gradient_checkpointing = False\n\n    def _init_weights(self, module):\n        \"\"\"Initialize the weights.\"\"\"\n        if isinstance(module, (nn.Linear,)):\n            # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization\n            # cf https://github.com/pytorch/pytorch/pull/5617\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n        elif isinstance(module, nn.LayerNorm):\n            module.bias.data.zero_()\n            module.weight.data.fill_(1.0)\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs={}):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, True, gradient_checkpointing_kwargs\n                )\n\n    def gradient_checkpointing_disable(self):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, False\n                )\n\n    def _set_gradient_checkpointing(\n        self,\n        module,\n        value=False,\n        gradient_checkpointing_kwargs={\"use_reentrant\": False},\n    ):\n        \"\"\"\n        Set gradient checkpointing for the ModuleFormerModel.\n\n        Args:\n            module: The module for which gradient checkpointing is set.\n            value (bool): Whether to enable gradient checkpointing.\n        \"\"\"\n        if isinstance(module, ModuleFormerModel):\n            module.gradient_checkpointing = value\n            module.gradient_checkpointing_kwargs = gradient_checkpointing_kwargs\n\n\nSPARSEGPT_START_DOCSTRING = r\"\"\"\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ModuleFormerConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nSPARSEGPT_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.ModuleFormerModel","uri":"program://ModuleFormer/class/moduleformer.modeling_moduleformer.ModuleFormerModel#L419-L589","kind":"class","name":"ModuleFormerModel","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":419,"end_line":589,"context_start_line":399,"context_end_line":609,"code":"\n        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_dim)`, *optional*):\n            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This\n            is useful if you want more control over how to convert *input_ids* indices into associated vectors than the\n            model's internal embedding lookup matrix.\n        output_attentions (`bool`, *optional*):\n            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n            tensors for more detail.\n        output_hidden_states (`bool`, *optional*):\n            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n            more detail.\n        return_dict (`bool`, *optional*):\n            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n\"\"\"\n\n\n@add_start_docstrings(\n    \"The bare ModuleFormer Model transformer outputting raw hidden-states without any specific head on top.\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerModel(ModuleFormerPreTrainedModel):\n    def __init__(self, config):\n        super().__init__(config)\n\n        self.embed_dim = config.n_embd\n        self.vocab_size = config.vocab_size\n        self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n        self.drop = nn.Dropout(config.embd_pdrop)\n        self.h = nn.ModuleList([ModuleFormerBlock(config) for _ in range(config.n_layer)])\n        self.ln_f = nn.LayerNorm(config.n_embd)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.wte\n\n    def set_input_embeddings(self, new_embeddings):\n        self.wte = new_embeddings\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=BaseModelOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        token_type_ids: Optional[torch.LongTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, BaseModelOutputWithPast]:\n        output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n        output_hidden_states = (\n            output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n        )\n        use_cache = use_cache if use_cache is not None else self.config.use_cache\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        if input_ids is not None and inputs_embeds is not None:\n            raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n        elif input_ids is not None:\n            input_shape = input_ids.size()\n            input_ids = input_ids.view(-1, input_shape[-1])\n            batch_size = input_ids.shape[0]\n        elif inputs_embeds is not None:\n            input_shape = inputs_embeds.size()[:-1]\n            batch_size = inputs_embeds.shape[0]\n        else:\n            raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n        if token_type_ids is not None:\n            token_type_ids = token_type_ids.view(-1, input_shape[-1])\n\n        if past_key_values is None:\n            past_key_values = tuple([None] * len(self.h))\n\n        # Attention mask.\n        if attention_mask is not None:\n            if batch_size <= 0:\n                raise ValueError(\"batch_size has to be defined and > 0\")\n            attention_mask = attention_mask.view(batch_size, -1)\n            # We create a 3D attention mask from a 2D tensor mask.\n            # Sizes are [batch_size, 1, 1, to_seq_length]\n            # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]\n            # this attention mask is more simple than the triangular masking of causal attention\n            # used in OpenAI GPT, we just need to prepare the broadcast dimension here.\n            attention_mask = attention_mask[:, None, None, None, :]\n\n            # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n            # masked positions, this operation will create a tensor which is 0.0 for\n            # positions we want to attend and the dtype's smallest value for masked positions.\n            # Since we are adding it to the raw scores before the softmax, this is\n            # effectively the same as removing these entirely.\n            attention_mask = attention_mask.to(dtype=self.dtype)  # fp16 compatibility\n            attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min\n\n        # Prepare head mask if needed\n        # 1.0 in head_mask indicate we keep the head\n        # attention_probs has shape bsz x num_attention_heads x N x N\n        # head_mask has shape n_layer x batch x num_attention_heads x N x N\n        head_mask = self.get_head_mask(head_mask, self.config.n_layer)\n\n        if inputs_embeds is None:\n            inputs_embeds = self.wte(input_ids)\n\n        hidden_states = inputs_embeds\n\n        if token_type_ids is not None:\n            token_type_embeds = self.wte(token_type_ids)\n            hidden_states = hidden_states + token_type_embeds\n\n        hidden_states = self.drop(hidden_states)\n\n        output_shape = input_shape + (hidden_states.size(-1),)\n\n        if self.gradient_checkpointing and self.training:\n            if use_cache:\n                logger.warning_once(\n                    \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n                    \"`use_cache=False`...\"\n                )\n                use_cache = False\n\n        presents = () if use_cache else None\n        all_self_attentions = () if output_attentions else None\n        all_hidden_states = () if output_hidden_states else None\n        self.aux_loss = 0\n        for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n\n                def create_custom_forward(module):\n                    def custom_forward(*inputs):\n                        # None for past_key_value\n                        return module(*inputs, use_cache, output_attentions)\n\n                    return custom_forward\n\n                outputs = torch.utils.checkpoint.checkpoint(\n                    create_custom_forward(block),\n                    hidden_states,\n                    None,\n                    attention_mask,\n                    head_mask[i],\n                    **self.gradient_checkpointing_kwargs,\n                )\n            else:\n                outputs = block(\n                    hidden_states,\n                    layer_past=layer_past,\n                    attention_mask=attention_mask,\n                    head_mask=head_mask[i],\n                    use_cache=use_cache,\n                    output_attentions=output_attentions,\n                )\n\n            hidden_states = outputs[0]\n            if use_cache is True:\n                presents = presents + (outputs[1],)\n\n            self.aux_loss = self.aux_loss + outputs[2]\n\n            if output_attentions:\n                all_self_attentions = all_self_attentions + (outputs[3],)\n\n        hidden_states = self.ln_f(hidden_states)\n\n        hidden_states = hidden_states.view(output_shape)\n        # Add last hidden state\n        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n\n        if not return_dict:\n            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=presents,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attentions,\n        )\n\n\n@add_start_docstrings(\n    \"\"\"\n    The ModuleFormer Model transformer with a language modeling head on top.\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForCausalLM(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.causal_mask\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.transformer = ModuleFormerModel(config)\n        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n        self.aux_loss_weight = config.aux_loss_weight\n\n        # Initialize weights and apply final processing\n        self.post_init()","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.ModuleFormerForCausalLM","uri":"program://ModuleFormer/class/moduleformer.modeling_moduleformer.ModuleFormerForCausalLM#L598-L720","kind":"class","name":"ModuleFormerForCausalLM","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":598,"end_line":720,"context_start_line":578,"context_end_line":740,"code":"        if output_hidden_states:\n            all_hidden_states = all_hidden_states + (hidden_states,)\n\n        if not return_dict:\n            return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)\n\n        return BaseModelOutputWithPast(\n            last_hidden_state=hidden_states,\n            past_key_values=presents,\n            hidden_states=all_hidden_states,\n            attentions=all_self_attentions,\n        )\n\n\n@add_start_docstrings(\n    \"\"\"\n    The ModuleFormer Model transformer with a language modeling head on top.\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForCausalLM(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.causal_mask\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.transformer = ModuleFormerModel(config)\n        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n        self.aux_loss_weight = config.aux_loss_weight\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):\n        token_type_ids = kwargs.get(\"token_type_ids\", None)\n        # only last token for inputs_ids if past is defined in kwargs\n        if past_key_values:\n            input_ids = input_ids[:, -1].unsqueeze(-1)\n            if token_type_ids is not None:\n                token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n        attention_mask = kwargs.get(\"attention_mask\", None)\n\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"use_cache\": kwargs.get(\"use_cache\"),\n            \"attention_mask\": attention_mask,\n            \"token_type_ids\": token_type_ids,\n        }\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=CausalLMOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        token_type_ids: Optional[torch.LongTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, CausalLMOutputWithPast]:\n        r\"\"\"\n        labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n            Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set\n            `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`\n            are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`\n        \"\"\"\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids,\n            past_key_values=past_key_values,\n            attention_mask=attention_mask,\n            token_type_ids=token_type_ids,\n            head_mask=head_mask,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n        hidden_states = transformer_outputs[0]\n\n        # make sure sampling in fp16 works correctly and\n        # compute loss in fp32 to match with mesh-tf version\n        # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179\n        lm_logits = self.lm_head(hidden_states).to(torch.float32)\n\n        loss = None\n        if labels is not None:\n            # Shift so that tokens < n predict n\n            shift_logits = lm_logits[..., :-1, :].contiguous()\n            shift_labels = labels[..., 1:].contiguous()\n            # Flatten the tokens\n            loss_fct = CrossEntropyLoss()\n            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n            loss = loss.to(hidden_states.dtype)\n\n            if self.aux_loss_weight > 0:\n                loss = loss + self.transformer.aux_loss * self.aux_loss_weight\n\n        if not return_dict:\n            output = (lm_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=lm_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n    @staticmethod\n    def _reorder_cache(\n        past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n    ) -> Tuple[Tuple[torch.Tensor]]:\n        \"\"\"\n        This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or\n        [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n        beam_idx at every generation step.\n        \"\"\"\n        return tuple(\n            tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n            for layer_past in past_key_values\n        )\n\n@add_start_docstrings(\n    \"\"\"\n    The ModuleFormer Model with a sequence classification head on top (linear layer).\n\n    [`ModuleFormerForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-1) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForSequenceClassification(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [\n        # r\"h\\.\\d+\\.attn\\.masked_bias\", \n        r\"lm_head.weight\"","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.ModuleFormerForSequenceClassification","uri":"program://ModuleFormer/class/moduleformer.modeling_moduleformer.ModuleFormerForSequenceClassification#L737-L848","kind":"class","name":"ModuleFormerForSequenceClassification","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":737,"end_line":848,"context_start_line":717,"context_end_line":848,"code":"        return tuple(\n            tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n            for layer_past in past_key_values\n        )\n\n@add_start_docstrings(\n    \"\"\"\n    The ModuleFormer Model with a sequence classification head on top (linear layer).\n\n    [`ModuleFormerForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-1) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForSequenceClassification(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [\n        # r\"h\\.\\d+\\.attn\\.masked_bias\", \n        r\"lm_head.weight\"\n    ]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.num_labels = config.num_labels\n        self.transformer = ModuleFormerModel(config)\n        self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING)\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=SequenceClassifierOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.Tensor] = None,\n        past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        token_type_ids: Optional[torch.Tensor] = None,\n        head_mask: Optional[torch.Tensor] = None,\n        inputs_embeds: Optional[torch.Tensor] = None,\n        labels: Optional[torch.Tensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:\n        r\"\"\"\n        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n        \"\"\"\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids,\n            past_key_values=past_key_values,\n            attention_mask=attention_mask,\n            token_type_ids=token_type_ids,\n            head_mask=head_mask,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n        hidden_states = transformer_outputs[0]\n        logits = self.score(hidden_states)\n\n        if input_ids is not None:\n            batch_size, sequence_length = input_ids.shape[:2]\n        else:\n            batch_size, sequence_length = inputs_embeds.shape[:2]\n\n        if self.config.pad_token_id is None and batch_size != 1:\n            raise ValueError(\"Cannot handle batch sizes > 1 if no padding token is defined.\")\n        if self.config.pad_token_id is None:\n            sequence_lengths = -1\n        else:\n            if input_ids is not None:\n                sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)\n            else:\n                sequence_lengths = -1\n                logger.warning(\n                    f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n                    \"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n                )\n\n        pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]\n\n        loss = None\n        if labels is not None:\n            if self.config.problem_type is None:\n                if self.num_labels == 1:\n                    self.config.problem_type = \"regression\"\n                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n                    self.config.problem_type = \"single_label_classification\"\n                else:\n                    self.config.problem_type = \"multi_label_classification\"\n\n            if self.config.problem_type == \"regression\":\n                loss_fct = MSELoss()\n                if self.num_labels == 1:\n                    loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n                else:\n                    loss = loss_fct(pooled_logits, labels)\n            elif self.config.problem_type == \"single_label_classification\":\n                loss_fct = CrossEntropyLoss()\n                loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n            elif self.config.problem_type == \"multi_label_classification\":\n                loss_fct = BCEWithLogitsLoss()\n                loss = loss_fct(pooled_logits, labels)\n        if not return_dict:\n            output = (pooled_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return SequenceClassifierOutputWithPast(\n            loss=loss,\n            logits=pooled_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.__init__","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.__init__#L743-L750","kind":"function","name":"__init__","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":743,"end_line":750,"context_start_line":723,"context_end_line":770,"code":"    \"\"\"\n    The ModuleFormer Model with a sequence classification head on top (linear layer).\n\n    [`ModuleFormerForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-1) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForSequenceClassification(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [\n        # r\"h\\.\\d+\\.attn\\.masked_bias\", \n        r\"lm_head.weight\"\n    ]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.num_labels = config.num_labels\n        self.transformer = ModuleFormerModel(config)\n        self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING)\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=SequenceClassifierOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.Tensor] = None,\n        past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        token_type_ids: Optional[torch.Tensor] = None,\n        head_mask: Optional[torch.Tensor] = None,\n        inputs_embeds: Optional[torch.Tensor] = None,\n        labels: Optional[torch.Tensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.add_history","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.add_history#L120-L143","kind":"function","name":"add_history","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":120,"end_line":143,"context_start_line":100,"context_end_line":163,"code":"        # regularization\n        self.attn_dropout = nn.Dropout(config.attn_pdrop)\n        # causal mask to ensure that attention is only applied to the left in the input sequence\n\n        self.context_length = config.history_length + config.block_size\n\n        self.register_buffer(\n            \"mask\", \n            torch.tril(torch.ones(self.context_length, self.context_length, dtype=torch.int8))\n        )\n        self.register_buffer(\n            \"cum_weight\", \n            torch.tril(torch.ones(self.context_length, self.context_length), -1)\n        )\n        self.n_head = config.n_head\n        self.top_k = config.k_att\n        self.n_embd = config.n_embd\n        self.att_hidden = config.att_hidden\n        self.head_size = config.att_hidden // config.n_head\n\n    def add_history(self, k, v, hidden, use_cache=False):\n        \"\"\"\n        Add history to key and value tensors.\n\n        Args:\n            k (torch.Tensor): Key tensor.\n            v (torch.Tensor): Value tensor.\n            hidden: Hidden state.\n            use_cache (bool): Whether to use cached history.\n\n        Returns:\n            Tuple[torch.Tensor, torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]: Updated key, value, and history.\n        \"\"\"\n        if hidden is None or not use_cache:\n            new_k = k\n            new_v = v\n        else:\n            k_history, v_history = hidden\n            new_k = torch.cat([k_history, k], dim=1)\n            new_v = torch.cat([v_history, v], dim=1)\n        k_history = new_k.detach()\n        v_history = new_v.detach()\n\n        return new_k, new_v, (k_history, v_history)\n\n    def forward(\n        self,\n        hidden_states: Optional[torch.FloatTensor],\n        attention_mask: Optional[torch.FloatTensor] = None,\n        layer_past: Optional[Tuple[torch.Tensor]] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = False,\n        output_attentions: Optional[bool] = False,\n    ) -> Union[\n        Tuple[torch.Tensor, Tuple[torch.Tensor]],\n        Optional[Tuple[torch.Tensor, Tuple[torch.Tensor], Tuple[torch.Tensor, ...]]],\n    ]:\n        \"\"\"\n        Forward pass of the ModuleFormerAttention module.\n\n        Args:\n            hidden_states (Optional[torch.FloatTensor]): Input hidden states.\n            attention_mask (Optional[torch.FloatTensor]): Attention mask.\n            layer_past (Optional[Tuple[torch.Tensor]]): Past layer state.","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.forward","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.forward#L758-L848","kind":"function","name":"forward","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":758,"end_line":848,"context_start_line":738,"context_end_line":848,"code":"    _keys_to_ignore_on_load_missing = [\n        # r\"h\\.\\d+\\.attn\\.masked_bias\", \n        r\"lm_head.weight\"\n    ]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.num_labels = config.num_labels\n        self.transformer = ModuleFormerModel(config)\n        self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING)\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=SequenceClassifierOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.Tensor] = None,\n        past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n        attention_mask: Optional[torch.Tensor] = None,\n        token_type_ids: Optional[torch.Tensor] = None,\n        head_mask: Optional[torch.Tensor] = None,\n        inputs_embeds: Optional[torch.Tensor] = None,\n        labels: Optional[torch.Tensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:\n        r\"\"\"\n        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n            Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,\n            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If\n            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).\n        \"\"\"\n        return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n        transformer_outputs = self.transformer(\n            input_ids,\n            past_key_values=past_key_values,\n            attention_mask=attention_mask,\n            token_type_ids=token_type_ids,\n            head_mask=head_mask,\n            inputs_embeds=inputs_embeds,\n            use_cache=use_cache,\n            output_attentions=output_attentions,\n            output_hidden_states=output_hidden_states,\n            return_dict=return_dict,\n        )\n        hidden_states = transformer_outputs[0]\n        logits = self.score(hidden_states)\n\n        if input_ids is not None:\n            batch_size, sequence_length = input_ids.shape[:2]\n        else:\n            batch_size, sequence_length = inputs_embeds.shape[:2]\n\n        if self.config.pad_token_id is None and batch_size != 1:\n            raise ValueError(\"Cannot handle batch sizes > 1 if no padding token is defined.\")\n        if self.config.pad_token_id is None:\n            sequence_lengths = -1\n        else:\n            if input_ids is not None:\n                sequence_lengths = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)\n            else:\n                sequence_lengths = -1\n                logger.warning(\n                    f\"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be \"\n                    \"unexpected if using padding tokens in conjunction with `inputs_embeds.`\"\n                )\n\n        pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]\n\n        loss = None\n        if labels is not None:\n            if self.config.problem_type is None:\n                if self.num_labels == 1:\n                    self.config.problem_type = \"regression\"\n                elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n                    self.config.problem_type = \"single_label_classification\"\n                else:\n                    self.config.problem_type = \"multi_label_classification\"\n\n            if self.config.problem_type == \"regression\":\n                loss_fct = MSELoss()\n                if self.num_labels == 1:\n                    loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())\n                else:\n                    loss = loss_fct(pooled_logits, labels)\n            elif self.config.problem_type == \"single_label_classification\":\n                loss_fct = CrossEntropyLoss()\n                loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))\n            elif self.config.problem_type == \"multi_label_classification\":\n                loss_fct = BCEWithLogitsLoss()\n                loss = loss_fct(pooled_logits, labels)\n        if not return_dict:\n            output = (pooled_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return SequenceClassifierOutputWithPast(\n            loss=loss,\n            logits=pooled_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.get_aux_loss_and_clear","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.get_aux_loss_and_clear#L229-L236","kind":"function","name":"get_aux_loss_and_clear","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":229,"end_line":236,"context_start_line":209,"context_end_line":256,"code":"        super().__init__()\n        self.ln_1 = nn.LayerNorm(config.n_embd)\n        self.attn = ModuleFormerAttention(config)\n        self.ln_2 = nn.LayerNorm(config.n_embd)\n        self.mlpf = MoE(\n                input_size=config.n_embd, \n                head_size=config.ffd_hidden, \n                num_experts=config.n_mlp_experts, \n                top_k=config.k_mlp, \n                bias=False, \n                activation=get_activation(config.activation_function),\n                acc_aux_loss=False,\n                gating_dropout=config.moe_pdrop,\n                sample_topk=config.sample_topk,\n                gating_size=config.gating_size,\n                aux_loss=config.aux_loss_type,\n                gate_type=config.gate_type,\n            )\n        self.resid_dropout = nn.Dropout(config.resid_pdrop)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get auxiliary loss and clear auxiliary loss accumulators in the attention and MLP layers.\n\n        Returns:\n            torch.Tensor: Auxiliary loss.\n        \"\"\"\n        return self.attn.q_proj.get_aux_loss_and_clear() + self.mlpf.get_aux_loss_and_clear()\n\n\n    def forward(\n        self,\n        hidden_states: Optional[torch.FloatTensor],\n        layer_past: Optional[Tuple[torch.Tensor]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = False,\n        output_attentions: Optional[bool] = False,\n    ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:\n        \"\"\"\n        Forward pass of the ModuleFormerBlock module.\n\n        Args:\n            hidden_states (Optional[torch.FloatTensor]): Input hidden states.\n            layer_past (Optional[Tuple[torch.Tensor]]): Past layer state.\n            attention_mask (Optional[torch.FloatTensor]): Attention mask.\n            head_mask (Optional[torch.FloatTensor]): Head mask.\n            use_cache (Optional[bool]): Whether to use cached states.","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer._init_weights","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer._init_weights#L306-L320","kind":"function","name":"_init_weights","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":306,"end_line":320,"context_start_line":286,"context_end_line":340,"code":"    models.\n    \"\"\"\n\n    config_class = ModuleFormerConfig\n    base_model_prefix = \"transformer\"\n    supports_gradient_checkpointing = True\n    _no_split_modules = [\"ModuleFormerBlock\"]\n\n    def __init__(self, *inputs, **kwargs):\n        \"\"\"\n        Initialize the ModuleFormerPreTrainedModel.\n\n        Args:\n            *inputs: Variable length input arguments.\n            **kwargs: Keyword arguments.\n        \"\"\"\n        super().__init__(*inputs, **kwargs)\n\n        self.gradient_checkpointing = False\n\n    def _init_weights(self, module):\n        \"\"\"Initialize the weights.\"\"\"\n        if isinstance(module, (nn.Linear,)):\n            # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization\n            # cf https://github.com/pytorch/pytorch/pull/5617\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n        elif isinstance(module, nn.LayerNorm):\n            module.bias.data.zero_()\n            module.weight.data.fill_(1.0)\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs={}):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, True, gradient_checkpointing_kwargs\n                )\n\n    def gradient_checkpointing_disable(self):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, False\n                )\n\n    def _set_gradient_checkpointing(\n        self,\n        module,\n        value=False,\n        gradient_checkpointing_kwargs={\"use_reentrant\": False},","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.gradient_checkpointing_enable","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.gradient_checkpointing_enable#L322-L327","kind":"function","name":"gradient_checkpointing_enable","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":322,"end_line":327,"context_start_line":302,"context_end_line":347,"code":"        super().__init__(*inputs, **kwargs)\n\n        self.gradient_checkpointing = False\n\n    def _init_weights(self, module):\n        \"\"\"Initialize the weights.\"\"\"\n        if isinstance(module, (nn.Linear,)):\n            # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization\n            # cf https://github.com/pytorch/pytorch/pull/5617\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n        elif isinstance(module, nn.LayerNorm):\n            module.bias.data.zero_()\n            module.weight.data.fill_(1.0)\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs={}):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, True, gradient_checkpointing_kwargs\n                )\n\n    def gradient_checkpointing_disable(self):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, False\n                )\n\n    def _set_gradient_checkpointing(\n        self,\n        module,\n        value=False,\n        gradient_checkpointing_kwargs={\"use_reentrant\": False},\n    ):\n        \"\"\"\n        Set gradient checkpointing for the ModuleFormerModel.\n\n        Args:\n            module: The module for which gradient checkpointing is set.\n            value (bool): Whether to enable gradient checkpointing.","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.gradient_checkpointing_disable","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.gradient_checkpointing_disable#L329-L334","kind":"function","name":"gradient_checkpointing_disable","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":329,"end_line":334,"context_start_line":309,"context_end_line":354,"code":"            # Slightly different from Mesh Transformer JAX which uses truncated_normal for initialization\n            # cf https://github.com/pytorch/pytorch/pull/5617\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.bias is not None:\n                module.bias.data.zero_()\n        elif isinstance(module, nn.Embedding):\n            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)\n            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n        elif isinstance(module, nn.LayerNorm):\n            module.bias.data.zero_()\n            module.weight.data.fill_(1.0)\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs={}):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, True, gradient_checkpointing_kwargs\n                )\n\n    def gradient_checkpointing_disable(self):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, False\n                )\n\n    def _set_gradient_checkpointing(\n        self,\n        module,\n        value=False,\n        gradient_checkpointing_kwargs={\"use_reentrant\": False},\n    ):\n        \"\"\"\n        Set gradient checkpointing for the ModuleFormerModel.\n\n        Args:\n            module: The module for which gradient checkpointing is set.\n            value (bool): Whether to enable gradient checkpointing.\n        \"\"\"\n        if isinstance(module, ModuleFormerModel):\n            module.gradient_checkpointing = value\n            module.gradient_checkpointing_kwargs = gradient_checkpointing_kwargs\n\n\nSPARSEGPT_START_DOCSTRING = r\"\"\"","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer._set_gradient_checkpointing","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer._set_gradient_checkpointing#L336-L351","kind":"function","name":"_set_gradient_checkpointing","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":336,"end_line":351,"context_start_line":316,"context_end_line":371,"code":"            if module.padding_idx is not None:\n                module.weight.data[module.padding_idx].zero_()\n        elif isinstance(module, nn.LayerNorm):\n            module.bias.data.zero_()\n            module.weight.data.fill_(1.0)\n\n    def gradient_checkpointing_enable(self, gradient_checkpointing_kwargs={}):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, True, gradient_checkpointing_kwargs\n                )\n\n    def gradient_checkpointing_disable(self):\n        for module in self.modules():\n            if hasattr(module, \"gradient_checkpointing\"):\n                self._set_gradient_checkpointing(\n                    module, False\n                )\n\n    def _set_gradient_checkpointing(\n        self,\n        module,\n        value=False,\n        gradient_checkpointing_kwargs={\"use_reentrant\": False},\n    ):\n        \"\"\"\n        Set gradient checkpointing for the ModuleFormerModel.\n\n        Args:\n            module: The module for which gradient checkpointing is set.\n            value (bool): Whether to enable gradient checkpointing.\n        \"\"\"\n        if isinstance(module, ModuleFormerModel):\n            module.gradient_checkpointing = value\n            module.gradient_checkpointing_kwargs = gradient_checkpointing_kwargs\n\n\nSPARSEGPT_START_DOCSTRING = r\"\"\"\n    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n    it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n    behavior.\n\n    Parameters:\n        config ([`ModuleFormerConfig`]): Model configuration class with all the parameters of the model.\n            Initializing with a config file does not load the weights associated with the model, only the\n            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\"\"\"\n\nSPARSEGPT_INPUTS_DOCSTRING = r\"\"\"\n    Args:\n        input_ids (`torch.LongTensor` of shape `({0})`):\n            Indices of input sequence tokens in the vocabulary.\n\n            Indices can be obtained using [`AutoProcenizer`]. See [`PreTrainedTokenizer.encode`] and\n            [`PreTrainedTokenizer.__call__`] for details.","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.get_input_embeddings","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.get_input_embeddings#L433-L434","kind":"function","name":"get_input_embeddings","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":433,"end_line":434,"context_start_line":413,"context_end_line":454,"code":"\n\n@add_start_docstrings(\n    \"The bare ModuleFormer Model transformer outputting raw hidden-states without any specific head on top.\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerModel(ModuleFormerPreTrainedModel):\n    def __init__(self, config):\n        super().__init__(config)\n\n        self.embed_dim = config.n_embd\n        self.vocab_size = config.vocab_size\n        self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n        self.drop = nn.Dropout(config.embd_pdrop)\n        self.h = nn.ModuleList([ModuleFormerBlock(config) for _ in range(config.n_layer)])\n        self.ln_f = nn.LayerNorm(config.n_embd)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.wte\n\n    def set_input_embeddings(self, new_embeddings):\n        self.wte = new_embeddings\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=BaseModelOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        token_type_ids: Optional[torch.LongTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.set_input_embeddings","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.set_input_embeddings#L436-L437","kind":"function","name":"set_input_embeddings","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":436,"end_line":437,"context_start_line":416,"context_end_line":457,"code":"    \"The bare ModuleFormer Model transformer outputting raw hidden-states without any specific head on top.\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerModel(ModuleFormerPreTrainedModel):\n    def __init__(self, config):\n        super().__init__(config)\n\n        self.embed_dim = config.n_embd\n        self.vocab_size = config.vocab_size\n        self.wte = nn.Embedding(config.vocab_size, config.n_embd)\n        self.drop = nn.Dropout(config.embd_pdrop)\n        self.h = nn.ModuleList([ModuleFormerBlock(config) for _ in range(config.n_layer)])\n        self.ln_f = nn.LayerNorm(config.n_embd)\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_input_embeddings(self):\n        return self.wte\n\n    def set_input_embeddings(self, new_embeddings):\n        self.wte = new_embeddings\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=BaseModelOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        token_type_ids: Optional[torch.LongTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,\n    ) -> Union[Tuple, BaseModelOutputWithPast]:","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.get_output_embeddings","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.get_output_embeddings#L611-L612","kind":"function","name":"get_output_embeddings","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":611,"end_line":612,"context_start_line":591,"context_end_line":632,"code":"\n@add_start_docstrings(\n    \"\"\"\n    The ModuleFormer Model transformer with a language modeling head on top.\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForCausalLM(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.causal_mask\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.transformer = ModuleFormerModel(config)\n        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n        self.aux_loss_weight = config.aux_loss_weight\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):\n        token_type_ids = kwargs.get(\"token_type_ids\", None)\n        # only last token for inputs_ids if past is defined in kwargs\n        if past_key_values:\n            input_ids = input_ids[:, -1].unsqueeze(-1)\n            if token_type_ids is not None:\n                token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n        attention_mask = kwargs.get(\"attention_mask\", None)\n\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"use_cache\": kwargs.get(\"use_cache\"),\n            \"attention_mask\": attention_mask,\n            \"token_type_ids\": token_type_ids,","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.set_output_embeddings","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.set_output_embeddings#L614-L615","kind":"function","name":"set_output_embeddings","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":614,"end_line":615,"context_start_line":594,"context_end_line":635,"code":"    The ModuleFormer Model transformer with a language modeling head on top.\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForCausalLM(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.causal_mask\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.transformer = ModuleFormerModel(config)\n        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n        self.aux_loss_weight = config.aux_loss_weight\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):\n        token_type_ids = kwargs.get(\"token_type_ids\", None)\n        # only last token for inputs_ids if past is defined in kwargs\n        if past_key_values:\n            input_ids = input_ids[:, -1].unsqueeze(-1)\n            if token_type_ids is not None:\n                token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n        attention_mask = kwargs.get(\"attention_mask\", None)\n\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"use_cache\": kwargs.get(\"use_cache\"),\n            \"attention_mask\": attention_mask,\n            \"token_type_ids\": token_type_ids,\n        }\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.prepare_inputs_for_generation","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.prepare_inputs_for_generation#L617-L633","kind":"function","name":"prepare_inputs_for_generation","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":617,"end_line":633,"context_start_line":597,"context_end_line":653,"code":")\nclass ModuleFormerForCausalLM(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.causal_mask\"]\n\n    def __init__(self, config):\n        super().__init__(config)\n        self.transformer = ModuleFormerModel(config)\n        self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n        self.aux_loss_weight = config.aux_loss_weight\n\n        # Initialize weights and apply final processing\n        self.post_init()\n\n    def get_output_embeddings(self):\n        return self.lm_head\n\n    def set_output_embeddings(self, new_embeddings):\n        self.lm_head = new_embeddings\n\n    def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs):\n        token_type_ids = kwargs.get(\"token_type_ids\", None)\n        # only last token for inputs_ids if past is defined in kwargs\n        if past_key_values:\n            input_ids = input_ids[:, -1].unsqueeze(-1)\n            if token_type_ids is not None:\n                token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n        attention_mask = kwargs.get(\"attention_mask\", None)\n\n        return {\n            \"input_ids\": input_ids,\n            \"past_key_values\": past_key_values,\n            \"use_cache\": kwargs.get(\"use_cache\"),\n            \"attention_mask\": attention_mask,\n            \"token_type_ids\": token_type_ids,\n        }\n\n    @add_start_docstrings_to_model_forward(SPARSEGPT_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n    @add_code_sample_docstrings(\n        checkpoint=_CHECKPOINT_FOR_DOC,\n        output_type=CausalLMOutputWithPast,\n        config_class=_CONFIG_FOR_DOC,\n    )\n    def forward(\n        self,\n        input_ids: Optional[torch.LongTensor] = None,\n        past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n        attention_mask: Optional[torch.FloatTensor] = None,\n        token_type_ids: Optional[torch.LongTensor] = None,\n        head_mask: Optional[torch.FloatTensor] = None,\n        inputs_embeds: Optional[torch.FloatTensor] = None,\n        labels: Optional[torch.LongTensor] = None,\n        use_cache: Optional[bool] = None,\n        output_attentions: Optional[bool] = None,\n        output_hidden_states: Optional[bool] = None,\n        return_dict: Optional[bool] = None,","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer._reorder_cache","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer._reorder_cache#L709-L720","kind":"function","name":"_reorder_cache","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":709,"end_line":720,"context_start_line":689,"context_end_line":740,"code":"            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))\n\n            loss = loss.to(hidden_states.dtype)\n\n            if self.aux_loss_weight > 0:\n                loss = loss + self.transformer.aux_loss * self.aux_loss_weight\n\n        if not return_dict:\n            output = (lm_logits,) + transformer_outputs[1:]\n            return ((loss,) + output) if loss is not None else output\n\n        return CausalLMOutputWithPast(\n            loss=loss,\n            logits=lm_logits,\n            past_key_values=transformer_outputs.past_key_values,\n            hidden_states=transformer_outputs.hidden_states,\n            attentions=transformer_outputs.attentions,\n        )\n\n    @staticmethod\n    def _reorder_cache(\n        past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n    ) -> Tuple[Tuple[torch.Tensor]]:\n        \"\"\"\n        This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or\n        [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct\n        beam_idx at every generation step.\n        \"\"\"\n        return tuple(\n            tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)\n            for layer_past in past_key_values\n        )\n\n@add_start_docstrings(\n    \"\"\"\n    The ModuleFormer Model with a sequence classification head on top (linear layer).\n\n    [`ModuleFormerForSequenceClassification`] uses the last token in order to do the classification, as other causal models\n    (e.g. GPT-1) do.\n\n    Since it does classification on the last token, it requires to know the position of the last token. If a\n    `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If\n    no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the\n    padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in\n    each row of the batch).\n    \"\"\",\n    SPARSEGPT_START_DOCSTRING,\n)\nclass ModuleFormerForSequenceClassification(ModuleFormerPreTrainedModel):\n    _keys_to_ignore_on_load_missing = [\n        # r\"h\\.\\d+\\.attn\\.masked_bias\", \n        r\"lm_head.weight\"","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.create_custom_forward","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.create_custom_forward#L540-L545","kind":"function","name":"create_custom_forward","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":540,"end_line":545,"context_start_line":520,"context_end_line":565,"code":"        output_shape = input_shape + (hidden_states.size(-1),)\n\n        if self.gradient_checkpointing and self.training:\n            if use_cache:\n                logger.warning_once(\n                    \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n                    \"`use_cache=False`...\"\n                )\n                use_cache = False\n\n        presents = () if use_cache else None\n        all_self_attentions = () if output_attentions else None\n        all_hidden_states = () if output_hidden_states else None\n        self.aux_loss = 0\n        for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n\n                def create_custom_forward(module):\n                    def custom_forward(*inputs):\n                        # None for past_key_value\n                        return module(*inputs, use_cache, output_attentions)\n\n                    return custom_forward\n\n                outputs = torch.utils.checkpoint.checkpoint(\n                    create_custom_forward(block),\n                    hidden_states,\n                    None,\n                    attention_mask,\n                    head_mask[i],\n                    **self.gradient_checkpointing_kwargs,\n                )\n            else:\n                outputs = block(\n                    hidden_states,\n                    layer_past=layer_past,\n                    attention_mask=attention_mask,\n                    head_mask=head_mask[i],\n                    use_cache=use_cache,\n                    output_attentions=output_attentions,\n                )\n\n            hidden_states = outputs[0]","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.modeling_moduleformer.custom_forward","uri":"program://ModuleFormer/function/moduleformer.modeling_moduleformer.custom_forward#L541-L543","kind":"function","name":"custom_forward","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":541,"end_line":543,"context_start_line":521,"context_end_line":563,"code":"\n        if self.gradient_checkpointing and self.training:\n            if use_cache:\n                logger.warning_once(\n                    \"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"\n                    \"`use_cache=False`...\"\n                )\n                use_cache = False\n\n        presents = () if use_cache else None\n        all_self_attentions = () if output_attentions else None\n        all_hidden_states = () if output_hidden_states else None\n        self.aux_loss = 0\n        for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n            if output_hidden_states:\n                all_hidden_states = all_hidden_states + (hidden_states,)\n\n            if self.gradient_checkpointing and self.training:\n\n                def create_custom_forward(module):\n                    def custom_forward(*inputs):\n                        # None for past_key_value\n                        return module(*inputs, use_cache, output_attentions)\n\n                    return custom_forward\n\n                outputs = torch.utils.checkpoint.checkpoint(\n                    create_custom_forward(block),\n                    hidden_states,\n                    None,\n                    attention_mask,\n                    head_mask[i],\n                    **self.gradient_checkpointing_kwargs,\n                )\n            else:\n                outputs = block(\n                    hidden_states,\n                    layer_past=layer_past,\n                    attention_mask=attention_mask,\n                    head_mask=head_mask[i],\n                    use_cache=use_cache,\n                    output_attentions=output_attentions,\n                )","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer","uri":"program://ModuleFormer/module/moduleformer.configuration_moduleformer#L1-L274","kind":"module","name":"moduleformer.configuration_moduleformer","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":1,"end_line":274,"context_start_line":1,"context_end_line":274,"code":"\"\"\" ModuleFormer model configuration\"\"\"\nfrom collections import OrderedDict\nfrom typing import Any, List, Mapping, Optional\n\nfrom transformers import PreTrainedTokenizer, TensorType, is_torch_available\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.onnx import OnnxConfigWithPast, PatchingSpec\nfrom transformers.utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\n# SPARSEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n#     \"moduleformer-small\": \"https://huggingface.co/moduleformer-small/resolve/main/config.json\",\n# }\n\n\n\nclass ModuleFormerConfig(PretrainedConfig):\n    r\"\"\"\n    This is the configuration class to store the configuration of a [`ModuleFormerModel`]. It is used to instantiate a\n    ModuleFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration\n    with the defaults will yield a similar configuration to that of the ModuleFormer\n    [moduleformer-small](https://huggingface.co/moduleformer-small) architecture. Configuration objects\n    inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from\n    [`PretrainedConfig`] for more information.\n\n    Args:\n        vocab_size (`int`, *optional*, defaults to 50400):\n            Vocabulary size of the ModuleFormer model. Defines the number of different tokens that can be represented by the\n            `inputs_ids` passed when calling [`ModuleFormerModel`].\n        n_positions (`int`, *optional*, defaults to 2048):\n            The maximum sequence length that this model might ever be used with. Typically set this to something large\n            just in case (e.g., 512 or 1024 or 2048).\n        n_embd (`int`, *optional*, defaults to 4096):\n            Dimensionality of the embeddings and hidden states.\n        n_layer (`int`, *optional*, defaults to 28):\n            Number of hidden layers in the Transformer encoder.\n        n_head (`int`, *optional*, defaults to 16):\n            Number of attention heads for each attention layer in the Transformer encoder.\n        rotary_dim (`int`, *optional*, defaults to 64):\n            Number of dimensions in the embedding that Rotary Position Embedding is applied to.\n        n_inner (`int`, *optional*, defaults to None):\n            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd\n        activation_function (`str`, *optional*, defaults to `\"gelu_new\"`):\n            Activation function, to be selected in the list `[\"relu\", \"silu\", \"gelu\", \"tanh\", \"gelu_new\"]`.\n        resid_pdrop (`float`, *optional*, defaults to 0.1):\n            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n        embd_pdrop (`int`, *optional*, defaults to 0.1):\n            The dropout ratio for the embeddings.\n        attn_pdrop (`float`, *optional*, defaults to 0.1):\n            The dropout ratio for the attention.\n        layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):\n            The epsilon to use in the layer normalization layers.\n        initializer_range (`float`, *optional*, defaults to 0.02):\n            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n        use_cache (`bool`, *optional*, defaults to `True`):\n            Whether or not the model should return the last key/values attentions (not used by all models).\n\n    Example:\n\n    ```python\n    >>> from transformers import ModuleFormerConfig, ModuleFormerModel\n\n    >>> # Initializing a ModuleFormer 6B configuration\n    >>> configuration = ModuleFormerConfig()\n\n    >>> # Initializing a model (with random weights) from the configuration\n    >>> model = ModuleFormerModel(configuration)\n\n    >>> # Accessing the model configuration\n    >>> configuration = model.config\n    ```\"\"\"\n    model_type = \"moduleformer\"\n    attribute_map = {\n        \"max_position_embeddings\": \"n_positions\",\n        \"hidden_size\": \"n_embd\",\n        \"num_attention_heads\": \"n_head\",\n        \"num_hidden_layers\": \"n_layer\",\n    }\n\n    def __init__(\n        self,\n        vocab_size=50295,\n        block_size=512,\n        history_length=512,\n        n_embd=1024,\n        n_layer=24,\n        n_head=8,\n        att_hidden = 512,\n        ffd_hidden=2048,\n        activation_function=\"gelu_new\",\n        resid_pdrop=0.0,\n        embd_pdrop=0.0,\n        attn_pdrop=0.0,\n        moe_pdrop=0.0,\n        sample_topk = 0,\n        gating_size = 256,\n        n_att_experts = 32,\n        k_att = 2,\n        n_mlp_experts = 32,\n        k_mlp = 2,\n        layer_norm_epsilon=1e-5,\n        initializer_range=0.02,\n        use_cache=True,\n        bos_token_id=50256,\n        eos_token_id=50256,\n        tie_word_embeddings=False,\n        aux_loss_type = 'mi',\n        aux_loss_weight=0,\n        gate_type = \"mlp\",\n        **kwargs,\n    ):\n        self.vocab_size = vocab_size\n        self.history_length = history_length\n        self.block_size = block_size\n        self.n_embd = n_embd\n        self.n_layer = n_layer\n        self.n_head = n_head\n        self.att_hidden = att_hidden\n        self.ffd_hidden = ffd_hidden\n        self.activation_function = activation_function\n        self.resid_pdrop = resid_pdrop\n        self.embd_pdrop = embd_pdrop\n        self.attn_pdrop = attn_pdrop\n        self.moe_pdrop = moe_pdrop\n        self.layer_norm_epsilon = layer_norm_epsilon\n        self.initializer_range = initializer_range\n        self.use_cache = use_cache\n        self.sample_topk = sample_topk\n        self.gating_size = gating_size\n        self.n_att_experts = n_att_experts\n        self.k_att = k_att\n        self.n_mlp_experts = n_mlp_experts\n        self.k_mlp = k_mlp\n        self.aux_loss_type = aux_loss_type\n        self.aux_loss_weight = aux_loss_weight\n        self.gate_type = gate_type\n        self.n_ctx = history_length * n_layer\n\n        self.bos_token_id = bos_token_id\n        self.eos_token_id = eos_token_id\n\n        super().__init__(\n            bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs\n        )\n\n\nclass ModuleFormerOnnxConfig(OnnxConfigWithPast):\n    def __init__(\n        self,\n        config: PretrainedConfig,\n        task: str = \"default\",\n        patching_specs: List[PatchingSpec] = None,\n        use_past: bool = False,\n    ):\n        \"\"\"\n        Initialize the ModuleFormerOnnxConfig.\n\n        Args:\n            config (PretrainedConfig): Pretrained model configuration.\n            task (str): Task description.\n            patching_specs (List[PatchingSpec]): List of patching specifications.\n            use_past (bool): Whether to use past tokens in the configuration.\n        \"\"\"\n        super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)\n        if not getattr(self._config, \"pad_token_id\", None):\n            # TODO: how to do that better?\n            self._config.pad_token_id = 0\n\n    @property\n    def inputs(self) -> Mapping[str, Mapping[int, str]]:\n        \"\"\"\n        Define the input mappings.\n\n        Returns:\n            Mapping[str, Mapping[int, str]]: Input mappings.\n        \"\"\"\n        common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n        if self.use_past:\n            self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n        else:\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n        return common_inputs\n\n    @property\n    def num_layers(self) -> int:\n        \"\"\"\n        Get the number of layers.\n\n        Returns:\n            int: Number of layers.\n        \"\"\"\n        return self._config.n_layer\n\n    @property\n    def num_attention_heads(self) -> int:\n        \"\"\"\n        Get the number of attention heads.\n\n        Returns:\n            int: Number of attention heads.\n        \"\"\"\n        return self._config.n_head\n\n    def generate_dummy_inputs(\n        self,\n        tokenizer: PreTrainedTokenizer,\n        batch_size: int = -1,\n        seq_length: int = -1,\n        is_pair: bool = False,\n        framework: Optional[TensorType] = None,\n    ) -> Mapping[str, Any]:\n        \"\"\"\n        Generate dummy inputs for testing.\n\n        Args:\n            tokenizer (PreTrainedTokenizer): Pretrained tokenizer.\n            batch_size (int): Batch size.\n            seq_length (int): Sequence length.\n            is_pair (bool): Whether the input is a pair.\n            framework (Optional[TensorType]): Tensor framework.\n\n        Returns:\n            Mapping[str, Any]: Dummy inputs.\n        \"\"\"\n        common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(\n            tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework\n        )\n\n        # We need to order the input in the way they appears in the forward()\n        ordered_inputs = OrderedDict({\"input_ids\": common_inputs[\"input_ids\"]})\n\n        # Need to add the past_keys\n        if self.use_past:\n            if not is_torch_available():\n                raise ValueError(\"Cannot generate dummy past_keys inputs without PyTorch installed.\")\n            else:\n                import torch\n\n                batch, seqlen = common_inputs[\"input_ids\"].shape\n                # Not using the same length for past_key_values\n                past_key_values_length = seqlen + 2\n                past_shape = (\n                    batch,\n                    self.num_attention_heads,\n                    past_key_values_length,\n                    self._config.hidden_size // self.num_attention_heads,\n                )\n                ordered_inputs[\"past_key_values\"] = [\n                    (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)\n                ]\n\n        ordered_inputs[\"attention_mask\"] = common_inputs[\"attention_mask\"]\n        if self.use_past:\n            mask_dtype = ordered_inputs[\"attention_mask\"].dtype\n            ordered_inputs[\"attention_mask\"] = torch.cat(\n                [ordered_inputs[\"attention_mask\"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1\n            )\n\n        return ordered_inputs\n\n    @property\n    def default_onnx_opset(self) -> int:\n        \"\"\"\n        Get the default ONNX opset version.\n\n        Returns:\n            int: Default ONNX opset version.\n        \"\"\"\n        return 13","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.ModuleFormerConfig","uri":"program://ModuleFormer/class/moduleformer.configuration_moduleformer.ModuleFormerConfig#L20-L147","kind":"class","name":"ModuleFormerConfig","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":20,"end_line":147,"context_start_line":1,"context_end_line":167,"code":"\"\"\" ModuleFormer model configuration\"\"\"\nfrom collections import OrderedDict\nfrom typing import Any, List, Mapping, Optional\n\nfrom transformers import PreTrainedTokenizer, TensorType, is_torch_available\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.onnx import OnnxConfigWithPast, PatchingSpec\nfrom transformers.utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\n# SPARSEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n#     \"moduleformer-small\": \"https://huggingface.co/moduleformer-small/resolve/main/config.json\",\n# }\n\n\n\nclass ModuleFormerConfig(PretrainedConfig):\n    r\"\"\"\n    This is the configuration class to store the configuration of a [`ModuleFormerModel`]. It is used to instantiate a\n    ModuleFormer model according to the specified arguments, defining the model architecture. Instantiating a configuration\n    with the defaults will yield a similar configuration to that of the ModuleFormer\n    [moduleformer-small](https://huggingface.co/moduleformer-small) architecture. Configuration objects\n    inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from\n    [`PretrainedConfig`] for more information.\n\n    Args:\n        vocab_size (`int`, *optional*, defaults to 50400):\n            Vocabulary size of the ModuleFormer model. Defines the number of different tokens that can be represented by the\n            `inputs_ids` passed when calling [`ModuleFormerModel`].\n        n_positions (`int`, *optional*, defaults to 2048):\n            The maximum sequence length that this model might ever be used with. Typically set this to something large\n            just in case (e.g., 512 or 1024 or 2048).\n        n_embd (`int`, *optional*, defaults to 4096):\n            Dimensionality of the embeddings and hidden states.\n        n_layer (`int`, *optional*, defaults to 28):\n            Number of hidden layers in the Transformer encoder.\n        n_head (`int`, *optional*, defaults to 16):\n            Number of attention heads for each attention layer in the Transformer encoder.\n        rotary_dim (`int`, *optional*, defaults to 64):\n            Number of dimensions in the embedding that Rotary Position Embedding is applied to.\n        n_inner (`int`, *optional*, defaults to None):\n            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd\n        activation_function (`str`, *optional*, defaults to `\"gelu_new\"`):\n            Activation function, to be selected in the list `[\"relu\", \"silu\", \"gelu\", \"tanh\", \"gelu_new\"]`.\n        resid_pdrop (`float`, *optional*, defaults to 0.1):\n            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.\n        embd_pdrop (`int`, *optional*, defaults to 0.1):\n            The dropout ratio for the embeddings.\n        attn_pdrop (`float`, *optional*, defaults to 0.1):\n            The dropout ratio for the attention.\n        layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):\n            The epsilon to use in the layer normalization layers.\n        initializer_range (`float`, *optional*, defaults to 0.02):\n            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n        use_cache (`bool`, *optional*, defaults to `True`):\n            Whether or not the model should return the last key/values attentions (not used by all models).\n\n    Example:\n\n    ```python\n    >>> from transformers import ModuleFormerConfig, ModuleFormerModel\n\n    >>> # Initializing a ModuleFormer 6B configuration\n    >>> configuration = ModuleFormerConfig()\n\n    >>> # Initializing a model (with random weights) from the configuration\n    >>> model = ModuleFormerModel(configuration)\n\n    >>> # Accessing the model configuration\n    >>> configuration = model.config\n    ```\"\"\"\n    model_type = \"moduleformer\"\n    attribute_map = {\n        \"max_position_embeddings\": \"n_positions\",\n        \"hidden_size\": \"n_embd\",\n        \"num_attention_heads\": \"n_head\",\n        \"num_hidden_layers\": \"n_layer\",\n    }\n\n    def __init__(\n        self,\n        vocab_size=50295,\n        block_size=512,\n        history_length=512,\n        n_embd=1024,\n        n_layer=24,\n        n_head=8,\n        att_hidden = 512,\n        ffd_hidden=2048,\n        activation_function=\"gelu_new\",\n        resid_pdrop=0.0,\n        embd_pdrop=0.0,\n        attn_pdrop=0.0,\n        moe_pdrop=0.0,\n        sample_topk = 0,\n        gating_size = 256,\n        n_att_experts = 32,\n        k_att = 2,\n        n_mlp_experts = 32,\n        k_mlp = 2,\n        layer_norm_epsilon=1e-5,\n        initializer_range=0.02,\n        use_cache=True,\n        bos_token_id=50256,\n        eos_token_id=50256,\n        tie_word_embeddings=False,\n        aux_loss_type = 'mi',\n        aux_loss_weight=0,\n        gate_type = \"mlp\",\n        **kwargs,\n    ):\n        self.vocab_size = vocab_size\n        self.history_length = history_length\n        self.block_size = block_size\n        self.n_embd = n_embd\n        self.n_layer = n_layer\n        self.n_head = n_head\n        self.att_hidden = att_hidden\n        self.ffd_hidden = ffd_hidden\n        self.activation_function = activation_function\n        self.resid_pdrop = resid_pdrop\n        self.embd_pdrop = embd_pdrop\n        self.attn_pdrop = attn_pdrop\n        self.moe_pdrop = moe_pdrop\n        self.layer_norm_epsilon = layer_norm_epsilon\n        self.initializer_range = initializer_range\n        self.use_cache = use_cache\n        self.sample_topk = sample_topk\n        self.gating_size = gating_size\n        self.n_att_experts = n_att_experts\n        self.k_att = k_att\n        self.n_mlp_experts = n_mlp_experts\n        self.k_mlp = k_mlp\n        self.aux_loss_type = aux_loss_type\n        self.aux_loss_weight = aux_loss_weight\n        self.gate_type = gate_type\n        self.n_ctx = history_length * n_layer\n\n        self.bos_token_id = bos_token_id\n        self.eos_token_id = eos_token_id\n\n        super().__init__(\n            bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs\n        )\n\n\nclass ModuleFormerOnnxConfig(OnnxConfigWithPast):\n    def __init__(\n        self,\n        config: PretrainedConfig,\n        task: str = \"default\",\n        patching_specs: List[PatchingSpec] = None,\n        use_past: bool = False,\n    ):\n        \"\"\"\n        Initialize the ModuleFormerOnnxConfig.\n\n        Args:\n            config (PretrainedConfig): Pretrained model configuration.\n            task (str): Task description.\n            patching_specs (List[PatchingSpec]): List of patching specifications.\n            use_past (bool): Whether to use past tokens in the configuration.\n        \"\"\"\n        super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.ModuleFormerOnnxConfig","uri":"program://ModuleFormer/class/moduleformer.configuration_moduleformer.ModuleFormerOnnxConfig#L150-L274","kind":"class","name":"ModuleFormerOnnxConfig","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":150,"end_line":274,"context_start_line":130,"context_end_line":274,"code":"        self.use_cache = use_cache\n        self.sample_topk = sample_topk\n        self.gating_size = gating_size\n        self.n_att_experts = n_att_experts\n        self.k_att = k_att\n        self.n_mlp_experts = n_mlp_experts\n        self.k_mlp = k_mlp\n        self.aux_loss_type = aux_loss_type\n        self.aux_loss_weight = aux_loss_weight\n        self.gate_type = gate_type\n        self.n_ctx = history_length * n_layer\n\n        self.bos_token_id = bos_token_id\n        self.eos_token_id = eos_token_id\n\n        super().__init__(\n            bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs\n        )\n\n\nclass ModuleFormerOnnxConfig(OnnxConfigWithPast):\n    def __init__(\n        self,\n        config: PretrainedConfig,\n        task: str = \"default\",\n        patching_specs: List[PatchingSpec] = None,\n        use_past: bool = False,\n    ):\n        \"\"\"\n        Initialize the ModuleFormerOnnxConfig.\n\n        Args:\n            config (PretrainedConfig): Pretrained model configuration.\n            task (str): Task description.\n            patching_specs (List[PatchingSpec]): List of patching specifications.\n            use_past (bool): Whether to use past tokens in the configuration.\n        \"\"\"\n        super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)\n        if not getattr(self._config, \"pad_token_id\", None):\n            # TODO: how to do that better?\n            self._config.pad_token_id = 0\n\n    @property\n    def inputs(self) -> Mapping[str, Mapping[int, str]]:\n        \"\"\"\n        Define the input mappings.\n\n        Returns:\n            Mapping[str, Mapping[int, str]]: Input mappings.\n        \"\"\"\n        common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n        if self.use_past:\n            self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n        else:\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n        return common_inputs\n\n    @property\n    def num_layers(self) -> int:\n        \"\"\"\n        Get the number of layers.\n\n        Returns:\n            int: Number of layers.\n        \"\"\"\n        return self._config.n_layer\n\n    @property\n    def num_attention_heads(self) -> int:\n        \"\"\"\n        Get the number of attention heads.\n\n        Returns:\n            int: Number of attention heads.\n        \"\"\"\n        return self._config.n_head\n\n    def generate_dummy_inputs(\n        self,\n        tokenizer: PreTrainedTokenizer,\n        batch_size: int = -1,\n        seq_length: int = -1,\n        is_pair: bool = False,\n        framework: Optional[TensorType] = None,\n    ) -> Mapping[str, Any]:\n        \"\"\"\n        Generate dummy inputs for testing.\n\n        Args:\n            tokenizer (PreTrainedTokenizer): Pretrained tokenizer.\n            batch_size (int): Batch size.\n            seq_length (int): Sequence length.\n            is_pair (bool): Whether the input is a pair.\n            framework (Optional[TensorType]): Tensor framework.\n\n        Returns:\n            Mapping[str, Any]: Dummy inputs.\n        \"\"\"\n        common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(\n            tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework\n        )\n\n        # We need to order the input in the way they appears in the forward()\n        ordered_inputs = OrderedDict({\"input_ids\": common_inputs[\"input_ids\"]})\n\n        # Need to add the past_keys\n        if self.use_past:\n            if not is_torch_available():\n                raise ValueError(\"Cannot generate dummy past_keys inputs without PyTorch installed.\")\n            else:\n                import torch\n\n                batch, seqlen = common_inputs[\"input_ids\"].shape\n                # Not using the same length for past_key_values\n                past_key_values_length = seqlen + 2\n                past_shape = (\n                    batch,\n                    self.num_attention_heads,\n                    past_key_values_length,\n                    self._config.hidden_size // self.num_attention_heads,\n                )\n                ordered_inputs[\"past_key_values\"] = [\n                    (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)\n                ]\n\n        ordered_inputs[\"attention_mask\"] = common_inputs[\"attention_mask\"]\n        if self.use_past:\n            mask_dtype = ordered_inputs[\"attention_mask\"].dtype\n            ordered_inputs[\"attention_mask\"] = torch.cat(\n                [ordered_inputs[\"attention_mask\"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1\n            )\n\n        return ordered_inputs\n\n    @property\n    def default_onnx_opset(self) -> int:\n        \"\"\"\n        Get the default ONNX opset version.\n\n        Returns:\n            int: Default ONNX opset version.\n        \"\"\"\n        return 13","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.__init__","uri":"program://ModuleFormer/function/moduleformer.configuration_moduleformer.__init__#L151-L170","kind":"function","name":"__init__","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":151,"end_line":170,"context_start_line":131,"context_end_line":190,"code":"        self.sample_topk = sample_topk\n        self.gating_size = gating_size\n        self.n_att_experts = n_att_experts\n        self.k_att = k_att\n        self.n_mlp_experts = n_mlp_experts\n        self.k_mlp = k_mlp\n        self.aux_loss_type = aux_loss_type\n        self.aux_loss_weight = aux_loss_weight\n        self.gate_type = gate_type\n        self.n_ctx = history_length * n_layer\n\n        self.bos_token_id = bos_token_id\n        self.eos_token_id = eos_token_id\n\n        super().__init__(\n            bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs\n        )\n\n\nclass ModuleFormerOnnxConfig(OnnxConfigWithPast):\n    def __init__(\n        self,\n        config: PretrainedConfig,\n        task: str = \"default\",\n        patching_specs: List[PatchingSpec] = None,\n        use_past: bool = False,\n    ):\n        \"\"\"\n        Initialize the ModuleFormerOnnxConfig.\n\n        Args:\n            config (PretrainedConfig): Pretrained model configuration.\n            task (str): Task description.\n            patching_specs (List[PatchingSpec]): List of patching specifications.\n            use_past (bool): Whether to use past tokens in the configuration.\n        \"\"\"\n        super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)\n        if not getattr(self._config, \"pad_token_id\", None):\n            # TODO: how to do that better?\n            self._config.pad_token_id = 0\n\n    @property\n    def inputs(self) -> Mapping[str, Mapping[int, str]]:\n        \"\"\"\n        Define the input mappings.\n\n        Returns:\n            Mapping[str, Mapping[int, str]]: Input mappings.\n        \"\"\"\n        common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n        if self.use_past:\n            self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n        else:\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n        return common_inputs\n\n    @property\n    def num_layers(self) -> int:","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.inputs","uri":"program://ModuleFormer/function/moduleformer.configuration_moduleformer.inputs#L173-L187","kind":"function","name":"inputs","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":173,"end_line":187,"context_start_line":153,"context_end_line":207,"code":"        config: PretrainedConfig,\n        task: str = \"default\",\n        patching_specs: List[PatchingSpec] = None,\n        use_past: bool = False,\n    ):\n        \"\"\"\n        Initialize the ModuleFormerOnnxConfig.\n\n        Args:\n            config (PretrainedConfig): Pretrained model configuration.\n            task (str): Task description.\n            patching_specs (List[PatchingSpec]): List of patching specifications.\n            use_past (bool): Whether to use past tokens in the configuration.\n        \"\"\"\n        super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)\n        if not getattr(self._config, \"pad_token_id\", None):\n            # TODO: how to do that better?\n            self._config.pad_token_id = 0\n\n    @property\n    def inputs(self) -> Mapping[str, Mapping[int, str]]:\n        \"\"\"\n        Define the input mappings.\n\n        Returns:\n            Mapping[str, Mapping[int, str]]: Input mappings.\n        \"\"\"\n        common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n        if self.use_past:\n            self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n        else:\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n        return common_inputs\n\n    @property\n    def num_layers(self) -> int:\n        \"\"\"\n        Get the number of layers.\n\n        Returns:\n            int: Number of layers.\n        \"\"\"\n        return self._config.n_layer\n\n    @property\n    def num_attention_heads(self) -> int:\n        \"\"\"\n        Get the number of attention heads.\n\n        Returns:\n            int: Number of attention heads.\n        \"\"\"\n        return self._config.n_head","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.num_layers","uri":"program://ModuleFormer/function/moduleformer.configuration_moduleformer.num_layers#L190-L197","kind":"function","name":"num_layers","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":190,"end_line":197,"context_start_line":170,"context_end_line":217,"code":"            self._config.pad_token_id = 0\n\n    @property\n    def inputs(self) -> Mapping[str, Mapping[int, str]]:\n        \"\"\"\n        Define the input mappings.\n\n        Returns:\n            Mapping[str, Mapping[int, str]]: Input mappings.\n        \"\"\"\n        common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n        if self.use_past:\n            self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n        else:\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n        return common_inputs\n\n    @property\n    def num_layers(self) -> int:\n        \"\"\"\n        Get the number of layers.\n\n        Returns:\n            int: Number of layers.\n        \"\"\"\n        return self._config.n_layer\n\n    @property\n    def num_attention_heads(self) -> int:\n        \"\"\"\n        Get the number of attention heads.\n\n        Returns:\n            int: Number of attention heads.\n        \"\"\"\n        return self._config.n_head\n\n    def generate_dummy_inputs(\n        self,\n        tokenizer: PreTrainedTokenizer,\n        batch_size: int = -1,\n        seq_length: int = -1,\n        is_pair: bool = False,\n        framework: Optional[TensorType] = None,\n    ) -> Mapping[str, Any]:\n        \"\"\"","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.num_attention_heads","uri":"program://ModuleFormer/function/moduleformer.configuration_moduleformer.num_attention_heads#L200-L207","kind":"function","name":"num_attention_heads","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":200,"end_line":207,"context_start_line":180,"context_end_line":227,"code":"        common_inputs = OrderedDict({\"input_ids\": {0: \"batch\", 1: \"sequence\"}})\n        if self.use_past:\n            self.fill_with_past_key_values_(common_inputs, direction=\"inputs\")\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"past_sequence + sequence\"}\n        else:\n            common_inputs[\"attention_mask\"] = {0: \"batch\", 1: \"sequence\"}\n\n        return common_inputs\n\n    @property\n    def num_layers(self) -> int:\n        \"\"\"\n        Get the number of layers.\n\n        Returns:\n            int: Number of layers.\n        \"\"\"\n        return self._config.n_layer\n\n    @property\n    def num_attention_heads(self) -> int:\n        \"\"\"\n        Get the number of attention heads.\n\n        Returns:\n            int: Number of attention heads.\n        \"\"\"\n        return self._config.n_head\n\n    def generate_dummy_inputs(\n        self,\n        tokenizer: PreTrainedTokenizer,\n        batch_size: int = -1,\n        seq_length: int = -1,\n        is_pair: bool = False,\n        framework: Optional[TensorType] = None,\n    ) -> Mapping[str, Any]:\n        \"\"\"\n        Generate dummy inputs for testing.\n\n        Args:\n            tokenizer (PreTrainedTokenizer): Pretrained tokenizer.\n            batch_size (int): Batch size.\n            seq_length (int): Sequence length.\n            is_pair (bool): Whether the input is a pair.\n            framework (Optional[TensorType]): Tensor framework.\n\n        Returns:","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.generate_dummy_inputs","uri":"program://ModuleFormer/function/moduleformer.configuration_moduleformer.generate_dummy_inputs#L209-L264","kind":"function","name":"generate_dummy_inputs","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":209,"end_line":264,"context_start_line":189,"context_end_line":274,"code":"    @property\n    def num_layers(self) -> int:\n        \"\"\"\n        Get the number of layers.\n\n        Returns:\n            int: Number of layers.\n        \"\"\"\n        return self._config.n_layer\n\n    @property\n    def num_attention_heads(self) -> int:\n        \"\"\"\n        Get the number of attention heads.\n\n        Returns:\n            int: Number of attention heads.\n        \"\"\"\n        return self._config.n_head\n\n    def generate_dummy_inputs(\n        self,\n        tokenizer: PreTrainedTokenizer,\n        batch_size: int = -1,\n        seq_length: int = -1,\n        is_pair: bool = False,\n        framework: Optional[TensorType] = None,\n    ) -> Mapping[str, Any]:\n        \"\"\"\n        Generate dummy inputs for testing.\n\n        Args:\n            tokenizer (PreTrainedTokenizer): Pretrained tokenizer.\n            batch_size (int): Batch size.\n            seq_length (int): Sequence length.\n            is_pair (bool): Whether the input is a pair.\n            framework (Optional[TensorType]): Tensor framework.\n\n        Returns:\n            Mapping[str, Any]: Dummy inputs.\n        \"\"\"\n        common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(\n            tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework\n        )\n\n        # We need to order the input in the way they appears in the forward()\n        ordered_inputs = OrderedDict({\"input_ids\": common_inputs[\"input_ids\"]})\n\n        # Need to add the past_keys\n        if self.use_past:\n            if not is_torch_available():\n                raise ValueError(\"Cannot generate dummy past_keys inputs without PyTorch installed.\")\n            else:\n                import torch\n\n                batch, seqlen = common_inputs[\"input_ids\"].shape\n                # Not using the same length for past_key_values\n                past_key_values_length = seqlen + 2\n                past_shape = (\n                    batch,\n                    self.num_attention_heads,\n                    past_key_values_length,\n                    self._config.hidden_size // self.num_attention_heads,\n                )\n                ordered_inputs[\"past_key_values\"] = [\n                    (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)\n                ]\n\n        ordered_inputs[\"attention_mask\"] = common_inputs[\"attention_mask\"]\n        if self.use_past:\n            mask_dtype = ordered_inputs[\"attention_mask\"].dtype\n            ordered_inputs[\"attention_mask\"] = torch.cat(\n                [ordered_inputs[\"attention_mask\"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1\n            )\n\n        return ordered_inputs\n\n    @property\n    def default_onnx_opset(self) -> int:\n        \"\"\"\n        Get the default ONNX opset version.\n\n        Returns:\n            int: Default ONNX opset version.\n        \"\"\"\n        return 13","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.configuration_moduleformer.default_onnx_opset","uri":"program://ModuleFormer/function/moduleformer.configuration_moduleformer.default_onnx_opset#L267-L274","kind":"function","name":"default_onnx_opset","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":267,"end_line":274,"context_start_line":247,"context_end_line":274,"code":"                past_shape = (\n                    batch,\n                    self.num_attention_heads,\n                    past_key_values_length,\n                    self._config.hidden_size // self.num_attention_heads,\n                )\n                ordered_inputs[\"past_key_values\"] = [\n                    (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)\n                ]\n\n        ordered_inputs[\"attention_mask\"] = common_inputs[\"attention_mask\"]\n        if self.use_past:\n            mask_dtype = ordered_inputs[\"attention_mask\"].dtype\n            ordered_inputs[\"attention_mask\"] = torch.cat(\n                [ordered_inputs[\"attention_mask\"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1\n            )\n\n        return ordered_inputs\n\n    @property\n    def default_onnx_opset(self) -> int:\n        \"\"\"\n        Get the default ONNX opset version.\n\n        Returns:\n            int: Default ONNX opset version.\n        \"\"\"\n        return 13","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate","uri":"program://ModuleFormer/module/moduleformer.utils.gate#L1-L251","kind":"module","name":"moduleformer.utils.gate","path":"moduleformer/utils/gate.py","language":"python","start_line":1,"end_line":251,"context_start_line":1,"context_end_line":251,"code":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Any, Dict, List, Optional\n\n# @torch.jit.script\ndef log_gmm_posterior(z, expert_centroids):\n     \"\"\"\n    Compute the log posterior probabilities of data points z belonging to Gaussian mixture components defined by centroids.\n\n    Args:\n        z (torch.Tensor): Data points (batch_size x feature_dim).\n        expert_centroids (torch.Tensor): Centroids of Gaussian mixture components (num_experts x feature_dim).\n\n    Returns:\n        torch.Tensor: Log posterior probabilities for each data point (batch_size x num_experts).\n    \"\"\"\n     return (\n        torch.matmul(z, expert_centroids.t())\n        # - 0.5 * (\n        #     torch.einsum('ni,ni->n', z, z)[:, None] +\n        #     torch.einsum('ni,ni->n', expert_centroids, expert_centroids)[None, :]\n        # )\n     )\n\n\n@torch.jit.script\ndef compute_gating(k: int, probs: torch.Tensor, top_k_gates: torch.Tensor, top_k_indices: torch.Tensor):\n    \"\"\"\n    Compute gating values for the mixture of experts based on probabilities and top-k indices.\n\n    Args:\n        k (int): Number of experts to select.\n        probs (torch.Tensor): Probability values for each expert (batch_size x num_experts).\n        top_k_gates (torch.Tensor): Gating values for top-k experts (batch_size x k).\n        top_k_indices (torch.Tensor): Indices of top-k experts (batch_size x k).\n\n    Returns:\n        torch.Tensor: Batch-level gating values.\n        torch.Tensor: Batch-level expert indices.\n        torch.Tensor: Expert size for each expert.\n        torch.Tensor: Sorted indices of top-k experts.\n    \"\"\"\n    zeros = torch.zeros_like(probs)\n    gates = zeros.scatter(1, top_k_indices, 1)\n    expert_size = gates.long().sum(0)\n    top_k_gates = top_k_gates.flatten()\n    top_k_experts = top_k_indices.flatten()\n    _, index_sorted_experts = top_k_experts.sort(0)\n    batch_index = index_sorted_experts.div(k, rounding_mode='trunc')\n    batch_gates = top_k_gates[index_sorted_experts]\n    return batch_gates, batch_index, expert_size, index_sorted_experts\n\n\nclass top_k_gating(nn.Module):\n    def __init__(\n        self,\n        input_size, \n        num_experts, \n        top_k,\n        acc_aux_loss=False, \n        dropout=0.1,\n        hidden_size=256,\n        sample_topk=0,\n        aux_loss='mi',\n        gate_type='mlp',\n    ):\n        \"\"\"\n        Initialize the top-k gating mechanism.\n\n        Args:\n            input_size (int): Size of the input.\n            num_experts (int): Number of experts.\n            top_k (int): Number of top experts to select.\n            acc_aux_loss (bool): Whether to accumulate auxiliary loss statistics.\n            dropout (float): Dropout rate for gating network.\n            hidden_size (int): Hidden size of the gating network.\n            sample_topk (int): Number of top-k experts to sample during training.\n            aux_loss (str): Type of auxiliary loss ('mi' or 'switch').\n            gate_type (str): Type of gating mechanism ('mlp', 'linear', or 'gmm').\n        \"\"\"\n        super().__init__()\n\n        self.num_experts = num_experts\n        self.input_size = input_size\n        assert top_k <= num_experts\n        self.top_k = top_k\n        assert sample_topk <= top_k\n        self.sample_topk = sample_topk\n\n        self.acc_aux_loss = acc_aux_loss\n        self.aux_loss = aux_loss\n        self.init_aux_statistics()\n\n        self.gate_type = gate_type\n        if gate_type == 'mlp':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, hidden_size),\n                nn.GELU(),\n                nn.Dropout(dropout),\n                nn.Linear(hidden_size, num_experts, bias=False)\n            )\n        elif gate_type == 'linear':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, num_experts, bias=False)\n            )\n        elif gate_type == 'gmm':\n            self.w_gate = nn.Linear(input_size, hidden_size, bias=False)\n            self.expert_centroids = nn.Parameter(torch.empty(num_experts, hidden_size))\n            nn.init.normal_(self.expert_centroids)\n            self.temperature = nn.Parameter(torch.zeros(1))\n        else:\n            print(gate_type)\n            raise NotImplementedError\n\n    def extra_repr(self):\n        \"\"\"\n        Return extra representation string for the module.\n        \"\"\"\n        return 'k={}, num_experts={}, aux_loss={}'.format(\n            self.top_k, self.num_experts, self.aux_loss)\n\n    def init_aux_statistics(self):\n        \"\"\"\n        Initialize auxiliary statistics based on the chosen auxiliary loss type.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            self.p_e = 0.\n            self.neg_H_e_given_x = 0.\n            self.count_layers = 0\n        else:\n            self.acc_probs = 0.\n            self.acc_freq = 0.\n            self.acc_lsesq = 0.\n            self.acc_count = 0\n\n    def update_aux_statistics(self, probs, logits, gates, skip_mask=None):\n        \"\"\"\n        Update auxiliary statistics based on the current batch.\n\n        Args:\n            probs (torch.Tensor): Probability values for each expert.\n            logits (torch.Tensor): Logits values for each expert.\n            gates (torch.Tensor): Gating values for each expert.\n            skip_mask (torch.Tensor): Skip mask tensor.\n\n        \"\"\"\n        if self.aux_loss == 'mi':\n            log_prob = torch.log_softmax(logits, dim=-1)\n            self.p_e = self.p_e + probs.mean(0)\n            self.neg_H_e_given_x = self.neg_H_e_given_x + (probs * log_prob).sum() / probs.size(0)\n            self.count_layers += 1\n        else:\n            self.acc_count = self.acc_count + logits.size(0)\n            self.acc_probs = self.acc_probs + probs.sum(0)\n            self.acc_freq = self.acc_freq + (gates > 0).float().sum(0)\n            lsesq = torch.log(torch.exp(logits).sum(dim=-1)) ** 2\n            self.acc_lsesq = self.acc_lsesq + lsesq.sum()\n\n    def get_aux_loss_and_clear(self, eps=1e-8):\n        \"\"\"\n        Calculate and return the auxiliary loss based on the accumulated statistics.\n\n        Args:\n            eps (float): Small epsilon value for numerical stability.\n\n        Returns:\n            torch.Tensor: The calculated auxiliary loss.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            denominator = self.count_layers \n            p_e = self.p_e / denominator\n            H_e = -(p_e * (p_e + eps).log()).sum()\n            neg_H_e_given_x = self.neg_H_e_given_x / denominator\n            miloss = -(neg_H_e_given_x + H_e)\n            loss = miloss\n        else:\n            switchloss =  self.num_experts * (\n                F.normalize(self.acc_probs, p=1, dim=0) *\n                F.normalize(self.acc_freq, p=1, dim=0)\n            ).sum()\n            zloss = self.acc_lsesq / self.acc_count\n            loss = switchloss + 0.1 * zloss\n\n        self.init_aux_statistics()\n        return loss\n\n    def forward(self, x, skip_mask=None):\n        \"\"\"\n        Compute the top-k gating for the input.\n\n        See paper: https://arxiv.org/abs/1701.06538.\n\n        Args:\n            x (torch.Tensor): Input tensor with shape [batch_size, input_size].\n            skip_mask (torch.Tensor): Skip mask tensor (binary) with the same shape as `x`.\n            x: input Tensor with shape [batch_size, input_size]\n            train: a boolean - we only add noise at training time.\n            noise_epsilon: a float\n\n        Returns:\n            torch.Tensor: Top-k indices.\n            torch.Tensor: Top-k gating values.\n            torch.Tensor: Probability values for each expert.\n            gates: a Tensor with shape [batch_size, num_experts]\n            load: a Tensor with shape [num_experts]\n        \"\"\"\n\n        if self.gate_type in ['linear', 'mlp']:\n            logits = self.w_gate(x)\n        elif self.gate_type == 'gmm':\n            z = self.w_gate(x)\n            logits = log_gmm_posterior(F.normalize(z, p=2, dim=-1), F.normalize(self.expert_centroids, p=2, dim=-1)) * self.temperature.exp()\n\n        probs = torch.softmax(logits, dim=1)\n        if skip_mask is not None:\n            probs = torch.masked_fill(probs, (skip_mask == 0), 0)\n            logits = torch.masked_fill(logits, (skip_mask == 0), 0)\n\n        if self.training and (self.sample_topk > 0):\n            _, top_km1_indices = probs.topk(self.top_k - self.sample_topk, dim=1)\n            masked_probs = probs + 1e-6\n            masked_probs[torch.arange(probs.size(0)).unsqueeze(\n                1), top_km1_indices] = 0\n            k_indices = torch.multinomial(masked_probs, self.sample_topk)\n            top_k_indices = torch.cat([top_km1_indices, k_indices], dim=-1)\n            top_k_gates = torch.gather(probs, 1, top_k_indices)\n        else:\n            top_k_gates, top_k_indices = probs.topk(self.top_k, dim=1)\n\n        # if self.top_k > 1:\n        #     top_k_gates = top_k_gates / (top_k_gates.sum(dim=1, keepdim=True) + 1e-6)\n        \n        # gate = torch.zeros_like(top_k_gates)\n        # gate[:, 0] = 1\n        # top_k_gates = (gate - top_k_gates).detach() + top_k_gates\n\n        zeros = torch.zeros_like(probs)\n        gates = zeros.scatter(1, top_k_indices, top_k_gates)\n        self.update_aux_statistics(probs, logits, gates, skip_mask)\n        if not self.acc_aux_loss:\n            self.loss = self.get_aux_loss_and_clear()\n        else:\n            self.loss = 0\n\n        return top_k_indices, top_k_gates, probs\n\n        # batch_gates, batch_index, expert_size, gates, index_sorted_experts = \\\n        #     compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n\n        # return batch_gates, batch_index, expert_size.tolist(), gates, index_sorted_experts","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.log_gmm_posterior","uri":"program://ModuleFormer/function/moduleformer.utils.gate.log_gmm_posterior#L7-L24","kind":"function","name":"log_gmm_posterior","path":"moduleformer/utils/gate.py","language":"python","start_line":7,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Any, Dict, List, Optional\n\n# @torch.jit.script\ndef log_gmm_posterior(z, expert_centroids):\n     \"\"\"\n    Compute the log posterior probabilities of data points z belonging to Gaussian mixture components defined by centroids.\n\n    Args:\n        z (torch.Tensor): Data points (batch_size x feature_dim).\n        expert_centroids (torch.Tensor): Centroids of Gaussian mixture components (num_experts x feature_dim).\n\n    Returns:\n        torch.Tensor: Log posterior probabilities for each data point (batch_size x num_experts).\n    \"\"\"\n     return (\n        torch.matmul(z, expert_centroids.t())\n        # - 0.5 * (\n        #     torch.einsum('ni,ni->n', z, z)[:, None] +\n        #     torch.einsum('ni,ni->n', expert_centroids, expert_centroids)[None, :]\n        # )\n     )\n\n\n@torch.jit.script\ndef compute_gating(k: int, probs: torch.Tensor, top_k_gates: torch.Tensor, top_k_indices: torch.Tensor):\n    \"\"\"\n    Compute gating values for the mixture of experts based on probabilities and top-k indices.\n\n    Args:\n        k (int): Number of experts to select.\n        probs (torch.Tensor): Probability values for each expert (batch_size x num_experts).\n        top_k_gates (torch.Tensor): Gating values for top-k experts (batch_size x k).\n        top_k_indices (torch.Tensor): Indices of top-k experts (batch_size x k).\n\n    Returns:\n        torch.Tensor: Batch-level gating values.\n        torch.Tensor: Batch-level expert indices.\n        torch.Tensor: Expert size for each expert.\n        torch.Tensor: Sorted indices of top-k experts.\n    \"\"\"\n    zeros = torch.zeros_like(probs)","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.compute_gating","uri":"program://ModuleFormer/function/moduleformer.utils.gate.compute_gating#L28-L52","kind":"function","name":"compute_gating","path":"moduleformer/utils/gate.py","language":"python","start_line":28,"end_line":52,"context_start_line":8,"context_end_line":72,"code":"     \"\"\"\n    Compute the log posterior probabilities of data points z belonging to Gaussian mixture components defined by centroids.\n\n    Args:\n        z (torch.Tensor): Data points (batch_size x feature_dim).\n        expert_centroids (torch.Tensor): Centroids of Gaussian mixture components (num_experts x feature_dim).\n\n    Returns:\n        torch.Tensor: Log posterior probabilities for each data point (batch_size x num_experts).\n    \"\"\"\n     return (\n        torch.matmul(z, expert_centroids.t())\n        # - 0.5 * (\n        #     torch.einsum('ni,ni->n', z, z)[:, None] +\n        #     torch.einsum('ni,ni->n', expert_centroids, expert_centroids)[None, :]\n        # )\n     )\n\n\n@torch.jit.script\ndef compute_gating(k: int, probs: torch.Tensor, top_k_gates: torch.Tensor, top_k_indices: torch.Tensor):\n    \"\"\"\n    Compute gating values for the mixture of experts based on probabilities and top-k indices.\n\n    Args:\n        k (int): Number of experts to select.\n        probs (torch.Tensor): Probability values for each expert (batch_size x num_experts).\n        top_k_gates (torch.Tensor): Gating values for top-k experts (batch_size x k).\n        top_k_indices (torch.Tensor): Indices of top-k experts (batch_size x k).\n\n    Returns:\n        torch.Tensor: Batch-level gating values.\n        torch.Tensor: Batch-level expert indices.\n        torch.Tensor: Expert size for each expert.\n        torch.Tensor: Sorted indices of top-k experts.\n    \"\"\"\n    zeros = torch.zeros_like(probs)\n    gates = zeros.scatter(1, top_k_indices, 1)\n    expert_size = gates.long().sum(0)\n    top_k_gates = top_k_gates.flatten()\n    top_k_experts = top_k_indices.flatten()\n    _, index_sorted_experts = top_k_experts.sort(0)\n    batch_index = index_sorted_experts.div(k, rounding_mode='trunc')\n    batch_gates = top_k_gates[index_sorted_experts]\n    return batch_gates, batch_index, expert_size, index_sorted_experts\n\n\nclass top_k_gating(nn.Module):\n    def __init__(\n        self,\n        input_size, \n        num_experts, \n        top_k,\n        acc_aux_loss=False, \n        dropout=0.1,\n        hidden_size=256,\n        sample_topk=0,\n        aux_loss='mi',\n        gate_type='mlp',\n    ):\n        \"\"\"\n        Initialize the top-k gating mechanism.\n\n        Args:\n            input_size (int): Size of the input.","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.top_k_gating","uri":"program://ModuleFormer/class/moduleformer.utils.gate.top_k_gating#L55-L246","kind":"class","name":"top_k_gating","path":"moduleformer/utils/gate.py","language":"python","start_line":55,"end_line":246,"context_start_line":35,"context_end_line":251,"code":"        top_k_gates (torch.Tensor): Gating values for top-k experts (batch_size x k).\n        top_k_indices (torch.Tensor): Indices of top-k experts (batch_size x k).\n\n    Returns:\n        torch.Tensor: Batch-level gating values.\n        torch.Tensor: Batch-level expert indices.\n        torch.Tensor: Expert size for each expert.\n        torch.Tensor: Sorted indices of top-k experts.\n    \"\"\"\n    zeros = torch.zeros_like(probs)\n    gates = zeros.scatter(1, top_k_indices, 1)\n    expert_size = gates.long().sum(0)\n    top_k_gates = top_k_gates.flatten()\n    top_k_experts = top_k_indices.flatten()\n    _, index_sorted_experts = top_k_experts.sort(0)\n    batch_index = index_sorted_experts.div(k, rounding_mode='trunc')\n    batch_gates = top_k_gates[index_sorted_experts]\n    return batch_gates, batch_index, expert_size, index_sorted_experts\n\n\nclass top_k_gating(nn.Module):\n    def __init__(\n        self,\n        input_size, \n        num_experts, \n        top_k,\n        acc_aux_loss=False, \n        dropout=0.1,\n        hidden_size=256,\n        sample_topk=0,\n        aux_loss='mi',\n        gate_type='mlp',\n    ):\n        \"\"\"\n        Initialize the top-k gating mechanism.\n\n        Args:\n            input_size (int): Size of the input.\n            num_experts (int): Number of experts.\n            top_k (int): Number of top experts to select.\n            acc_aux_loss (bool): Whether to accumulate auxiliary loss statistics.\n            dropout (float): Dropout rate for gating network.\n            hidden_size (int): Hidden size of the gating network.\n            sample_topk (int): Number of top-k experts to sample during training.\n            aux_loss (str): Type of auxiliary loss ('mi' or 'switch').\n            gate_type (str): Type of gating mechanism ('mlp', 'linear', or 'gmm').\n        \"\"\"\n        super().__init__()\n\n        self.num_experts = num_experts\n        self.input_size = input_size\n        assert top_k <= num_experts\n        self.top_k = top_k\n        assert sample_topk <= top_k\n        self.sample_topk = sample_topk\n\n        self.acc_aux_loss = acc_aux_loss\n        self.aux_loss = aux_loss\n        self.init_aux_statistics()\n\n        self.gate_type = gate_type\n        if gate_type == 'mlp':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, hidden_size),\n                nn.GELU(),\n                nn.Dropout(dropout),\n                nn.Linear(hidden_size, num_experts, bias=False)\n            )\n        elif gate_type == 'linear':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, num_experts, bias=False)\n            )\n        elif gate_type == 'gmm':\n            self.w_gate = nn.Linear(input_size, hidden_size, bias=False)\n            self.expert_centroids = nn.Parameter(torch.empty(num_experts, hidden_size))\n            nn.init.normal_(self.expert_centroids)\n            self.temperature = nn.Parameter(torch.zeros(1))\n        else:\n            print(gate_type)\n            raise NotImplementedError\n\n    def extra_repr(self):\n        \"\"\"\n        Return extra representation string for the module.\n        \"\"\"\n        return 'k={}, num_experts={}, aux_loss={}'.format(\n            self.top_k, self.num_experts, self.aux_loss)\n\n    def init_aux_statistics(self):\n        \"\"\"\n        Initialize auxiliary statistics based on the chosen auxiliary loss type.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            self.p_e = 0.\n            self.neg_H_e_given_x = 0.\n            self.count_layers = 0\n        else:\n            self.acc_probs = 0.\n            self.acc_freq = 0.\n            self.acc_lsesq = 0.\n            self.acc_count = 0\n\n    def update_aux_statistics(self, probs, logits, gates, skip_mask=None):\n        \"\"\"\n        Update auxiliary statistics based on the current batch.\n\n        Args:\n            probs (torch.Tensor): Probability values for each expert.\n            logits (torch.Tensor): Logits values for each expert.\n            gates (torch.Tensor): Gating values for each expert.\n            skip_mask (torch.Tensor): Skip mask tensor.\n\n        \"\"\"\n        if self.aux_loss == 'mi':\n            log_prob = torch.log_softmax(logits, dim=-1)\n            self.p_e = self.p_e + probs.mean(0)\n            self.neg_H_e_given_x = self.neg_H_e_given_x + (probs * log_prob).sum() / probs.size(0)\n            self.count_layers += 1\n        else:\n            self.acc_count = self.acc_count + logits.size(0)\n            self.acc_probs = self.acc_probs + probs.sum(0)\n            self.acc_freq = self.acc_freq + (gates > 0).float().sum(0)\n            lsesq = torch.log(torch.exp(logits).sum(dim=-1)) ** 2\n            self.acc_lsesq = self.acc_lsesq + lsesq.sum()\n\n    def get_aux_loss_and_clear(self, eps=1e-8):\n        \"\"\"\n        Calculate and return the auxiliary loss based on the accumulated statistics.\n\n        Args:\n            eps (float): Small epsilon value for numerical stability.\n\n        Returns:\n            torch.Tensor: The calculated auxiliary loss.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            denominator = self.count_layers \n            p_e = self.p_e / denominator\n            H_e = -(p_e * (p_e + eps).log()).sum()\n            neg_H_e_given_x = self.neg_H_e_given_x / denominator\n            miloss = -(neg_H_e_given_x + H_e)\n            loss = miloss\n        else:\n            switchloss =  self.num_experts * (\n                F.normalize(self.acc_probs, p=1, dim=0) *\n                F.normalize(self.acc_freq, p=1, dim=0)\n            ).sum()\n            zloss = self.acc_lsesq / self.acc_count\n            loss = switchloss + 0.1 * zloss\n\n        self.init_aux_statistics()\n        return loss\n\n    def forward(self, x, skip_mask=None):\n        \"\"\"\n        Compute the top-k gating for the input.\n\n        See paper: https://arxiv.org/abs/1701.06538.\n\n        Args:\n            x (torch.Tensor): Input tensor with shape [batch_size, input_size].\n            skip_mask (torch.Tensor): Skip mask tensor (binary) with the same shape as `x`.\n            x: input Tensor with shape [batch_size, input_size]\n            train: a boolean - we only add noise at training time.\n            noise_epsilon: a float\n\n        Returns:\n            torch.Tensor: Top-k indices.\n            torch.Tensor: Top-k gating values.\n            torch.Tensor: Probability values for each expert.\n            gates: a Tensor with shape [batch_size, num_experts]\n            load: a Tensor with shape [num_experts]\n        \"\"\"\n\n        if self.gate_type in ['linear', 'mlp']:\n            logits = self.w_gate(x)\n        elif self.gate_type == 'gmm':\n            z = self.w_gate(x)\n            logits = log_gmm_posterior(F.normalize(z, p=2, dim=-1), F.normalize(self.expert_centroids, p=2, dim=-1)) * self.temperature.exp()\n\n        probs = torch.softmax(logits, dim=1)\n        if skip_mask is not None:\n            probs = torch.masked_fill(probs, (skip_mask == 0), 0)\n            logits = torch.masked_fill(logits, (skip_mask == 0), 0)\n\n        if self.training and (self.sample_topk > 0):\n            _, top_km1_indices = probs.topk(self.top_k - self.sample_topk, dim=1)\n            masked_probs = probs + 1e-6\n            masked_probs[torch.arange(probs.size(0)).unsqueeze(\n                1), top_km1_indices] = 0\n            k_indices = torch.multinomial(masked_probs, self.sample_topk)\n            top_k_indices = torch.cat([top_km1_indices, k_indices], dim=-1)\n            top_k_gates = torch.gather(probs, 1, top_k_indices)\n        else:\n            top_k_gates, top_k_indices = probs.topk(self.top_k, dim=1)\n\n        # if self.top_k > 1:\n        #     top_k_gates = top_k_gates / (top_k_gates.sum(dim=1, keepdim=True) + 1e-6)\n        \n        # gate = torch.zeros_like(top_k_gates)\n        # gate[:, 0] = 1\n        # top_k_gates = (gate - top_k_gates).detach() + top_k_gates\n\n        zeros = torch.zeros_like(probs)\n        gates = zeros.scatter(1, top_k_indices, top_k_gates)\n        self.update_aux_statistics(probs, logits, gates, skip_mask)\n        if not self.acc_aux_loss:\n            self.loss = self.get_aux_loss_and_clear()\n        else:\n            self.loss = 0\n\n        return top_k_indices, top_k_gates, probs\n\n        # batch_gates, batch_index, expert_size, gates, index_sorted_experts = \\\n        #     compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n\n        # return batch_gates, batch_index, expert_size.tolist(), gates, index_sorted_experts","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.__init__","uri":"program://ModuleFormer/function/moduleformer.utils.gate.__init__#L56-L114","kind":"function","name":"__init__","path":"moduleformer/utils/gate.py","language":"python","start_line":56,"end_line":114,"context_start_line":36,"context_end_line":134,"code":"        top_k_indices (torch.Tensor): Indices of top-k experts (batch_size x k).\n\n    Returns:\n        torch.Tensor: Batch-level gating values.\n        torch.Tensor: Batch-level expert indices.\n        torch.Tensor: Expert size for each expert.\n        torch.Tensor: Sorted indices of top-k experts.\n    \"\"\"\n    zeros = torch.zeros_like(probs)\n    gates = zeros.scatter(1, top_k_indices, 1)\n    expert_size = gates.long().sum(0)\n    top_k_gates = top_k_gates.flatten()\n    top_k_experts = top_k_indices.flatten()\n    _, index_sorted_experts = top_k_experts.sort(0)\n    batch_index = index_sorted_experts.div(k, rounding_mode='trunc')\n    batch_gates = top_k_gates[index_sorted_experts]\n    return batch_gates, batch_index, expert_size, index_sorted_experts\n\n\nclass top_k_gating(nn.Module):\n    def __init__(\n        self,\n        input_size, \n        num_experts, \n        top_k,\n        acc_aux_loss=False, \n        dropout=0.1,\n        hidden_size=256,\n        sample_topk=0,\n        aux_loss='mi',\n        gate_type='mlp',\n    ):\n        \"\"\"\n        Initialize the top-k gating mechanism.\n\n        Args:\n            input_size (int): Size of the input.\n            num_experts (int): Number of experts.\n            top_k (int): Number of top experts to select.\n            acc_aux_loss (bool): Whether to accumulate auxiliary loss statistics.\n            dropout (float): Dropout rate for gating network.\n            hidden_size (int): Hidden size of the gating network.\n            sample_topk (int): Number of top-k experts to sample during training.\n            aux_loss (str): Type of auxiliary loss ('mi' or 'switch').\n            gate_type (str): Type of gating mechanism ('mlp', 'linear', or 'gmm').\n        \"\"\"\n        super().__init__()\n\n        self.num_experts = num_experts\n        self.input_size = input_size\n        assert top_k <= num_experts\n        self.top_k = top_k\n        assert sample_topk <= top_k\n        self.sample_topk = sample_topk\n\n        self.acc_aux_loss = acc_aux_loss\n        self.aux_loss = aux_loss\n        self.init_aux_statistics()\n\n        self.gate_type = gate_type\n        if gate_type == 'mlp':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, hidden_size),\n                nn.GELU(),\n                nn.Dropout(dropout),\n                nn.Linear(hidden_size, num_experts, bias=False)\n            )\n        elif gate_type == 'linear':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, num_experts, bias=False)\n            )\n        elif gate_type == 'gmm':\n            self.w_gate = nn.Linear(input_size, hidden_size, bias=False)\n            self.expert_centroids = nn.Parameter(torch.empty(num_experts, hidden_size))\n            nn.init.normal_(self.expert_centroids)\n            self.temperature = nn.Parameter(torch.zeros(1))\n        else:\n            print(gate_type)\n            raise NotImplementedError\n\n    def extra_repr(self):\n        \"\"\"\n        Return extra representation string for the module.\n        \"\"\"\n        return 'k={}, num_experts={}, aux_loss={}'.format(\n            self.top_k, self.num_experts, self.aux_loss)\n\n    def init_aux_statistics(self):\n        \"\"\"\n        Initialize auxiliary statistics based on the chosen auxiliary loss type.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            self.p_e = 0.\n            self.neg_H_e_given_x = 0.\n            self.count_layers = 0\n        else:\n            self.acc_probs = 0.\n            self.acc_freq = 0.\n            self.acc_lsesq = 0.","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.extra_repr","uri":"program://ModuleFormer/function/moduleformer.utils.gate.extra_repr#L116-L121","kind":"function","name":"extra_repr","path":"moduleformer/utils/gate.py","language":"python","start_line":116,"end_line":121,"context_start_line":96,"context_end_line":141,"code":"        if gate_type == 'mlp':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, hidden_size),\n                nn.GELU(),\n                nn.Dropout(dropout),\n                nn.Linear(hidden_size, num_experts, bias=False)\n            )\n        elif gate_type == 'linear':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, num_experts, bias=False)\n            )\n        elif gate_type == 'gmm':\n            self.w_gate = nn.Linear(input_size, hidden_size, bias=False)\n            self.expert_centroids = nn.Parameter(torch.empty(num_experts, hidden_size))\n            nn.init.normal_(self.expert_centroids)\n            self.temperature = nn.Parameter(torch.zeros(1))\n        else:\n            print(gate_type)\n            raise NotImplementedError\n\n    def extra_repr(self):\n        \"\"\"\n        Return extra representation string for the module.\n        \"\"\"\n        return 'k={}, num_experts={}, aux_loss={}'.format(\n            self.top_k, self.num_experts, self.aux_loss)\n\n    def init_aux_statistics(self):\n        \"\"\"\n        Initialize auxiliary statistics based on the chosen auxiliary loss type.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            self.p_e = 0.\n            self.neg_H_e_given_x = 0.\n            self.count_layers = 0\n        else:\n            self.acc_probs = 0.\n            self.acc_freq = 0.\n            self.acc_lsesq = 0.\n            self.acc_count = 0\n\n    def update_aux_statistics(self, probs, logits, gates, skip_mask=None):\n        \"\"\"\n        Update auxiliary statistics based on the current batch.\n\n        Args:","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.init_aux_statistics","uri":"program://ModuleFormer/function/moduleformer.utils.gate.init_aux_statistics#L123-L135","kind":"function","name":"init_aux_statistics","path":"moduleformer/utils/gate.py","language":"python","start_line":123,"end_line":135,"context_start_line":103,"context_end_line":155,"code":"        elif gate_type == 'linear':\n            self.w_gate = nn.Sequential(\n                nn.Linear(input_size, num_experts, bias=False)\n            )\n        elif gate_type == 'gmm':\n            self.w_gate = nn.Linear(input_size, hidden_size, bias=False)\n            self.expert_centroids = nn.Parameter(torch.empty(num_experts, hidden_size))\n            nn.init.normal_(self.expert_centroids)\n            self.temperature = nn.Parameter(torch.zeros(1))\n        else:\n            print(gate_type)\n            raise NotImplementedError\n\n    def extra_repr(self):\n        \"\"\"\n        Return extra representation string for the module.\n        \"\"\"\n        return 'k={}, num_experts={}, aux_loss={}'.format(\n            self.top_k, self.num_experts, self.aux_loss)\n\n    def init_aux_statistics(self):\n        \"\"\"\n        Initialize auxiliary statistics based on the chosen auxiliary loss type.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            self.p_e = 0.\n            self.neg_H_e_given_x = 0.\n            self.count_layers = 0\n        else:\n            self.acc_probs = 0.\n            self.acc_freq = 0.\n            self.acc_lsesq = 0.\n            self.acc_count = 0\n\n    def update_aux_statistics(self, probs, logits, gates, skip_mask=None):\n        \"\"\"\n        Update auxiliary statistics based on the current batch.\n\n        Args:\n            probs (torch.Tensor): Probability values for each expert.\n            logits (torch.Tensor): Logits values for each expert.\n            gates (torch.Tensor): Gating values for each expert.\n            skip_mask (torch.Tensor): Skip mask tensor.\n\n        \"\"\"\n        if self.aux_loss == 'mi':\n            log_prob = torch.log_softmax(logits, dim=-1)\n            self.p_e = self.p_e + probs.mean(0)\n            self.neg_H_e_given_x = self.neg_H_e_given_x + (probs * log_prob).sum() / probs.size(0)\n            self.count_layers += 1\n        else:\n            self.acc_count = self.acc_count + logits.size(0)\n            self.acc_probs = self.acc_probs + probs.sum(0)","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.update_aux_statistics","uri":"program://ModuleFormer/function/moduleformer.utils.gate.update_aux_statistics#L137-L158","kind":"function","name":"update_aux_statistics","path":"moduleformer/utils/gate.py","language":"python","start_line":137,"end_line":158,"context_start_line":117,"context_end_line":178,"code":"        \"\"\"\n        Return extra representation string for the module.\n        \"\"\"\n        return 'k={}, num_experts={}, aux_loss={}'.format(\n            self.top_k, self.num_experts, self.aux_loss)\n\n    def init_aux_statistics(self):\n        \"\"\"\n        Initialize auxiliary statistics based on the chosen auxiliary loss type.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            self.p_e = 0.\n            self.neg_H_e_given_x = 0.\n            self.count_layers = 0\n        else:\n            self.acc_probs = 0.\n            self.acc_freq = 0.\n            self.acc_lsesq = 0.\n            self.acc_count = 0\n\n    def update_aux_statistics(self, probs, logits, gates, skip_mask=None):\n        \"\"\"\n        Update auxiliary statistics based on the current batch.\n\n        Args:\n            probs (torch.Tensor): Probability values for each expert.\n            logits (torch.Tensor): Logits values for each expert.\n            gates (torch.Tensor): Gating values for each expert.\n            skip_mask (torch.Tensor): Skip mask tensor.\n\n        \"\"\"\n        if self.aux_loss == 'mi':\n            log_prob = torch.log_softmax(logits, dim=-1)\n            self.p_e = self.p_e + probs.mean(0)\n            self.neg_H_e_given_x = self.neg_H_e_given_x + (probs * log_prob).sum() / probs.size(0)\n            self.count_layers += 1\n        else:\n            self.acc_count = self.acc_count + logits.size(0)\n            self.acc_probs = self.acc_probs + probs.sum(0)\n            self.acc_freq = self.acc_freq + (gates > 0).float().sum(0)\n            lsesq = torch.log(torch.exp(logits).sum(dim=-1)) ** 2\n            self.acc_lsesq = self.acc_lsesq + lsesq.sum()\n\n    def get_aux_loss_and_clear(self, eps=1e-8):\n        \"\"\"\n        Calculate and return the auxiliary loss based on the accumulated statistics.\n\n        Args:\n            eps (float): Small epsilon value for numerical stability.\n\n        Returns:\n            torch.Tensor: The calculated auxiliary loss.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            denominator = self.count_layers \n            p_e = self.p_e / denominator\n            H_e = -(p_e * (p_e + eps).log()).sum()\n            neg_H_e_given_x = self.neg_H_e_given_x / denominator\n            miloss = -(neg_H_e_given_x + H_e)\n            loss = miloss\n        else:\n            switchloss =  self.num_experts * (","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.get_aux_loss_and_clear","uri":"program://ModuleFormer/function/moduleformer.utils.gate.get_aux_loss_and_clear#L160-L186","kind":"function","name":"get_aux_loss_and_clear","path":"moduleformer/utils/gate.py","language":"python","start_line":160,"end_line":186,"context_start_line":140,"context_end_line":206,"code":"\n        Args:\n            probs (torch.Tensor): Probability values for each expert.\n            logits (torch.Tensor): Logits values for each expert.\n            gates (torch.Tensor): Gating values for each expert.\n            skip_mask (torch.Tensor): Skip mask tensor.\n\n        \"\"\"\n        if self.aux_loss == 'mi':\n            log_prob = torch.log_softmax(logits, dim=-1)\n            self.p_e = self.p_e + probs.mean(0)\n            self.neg_H_e_given_x = self.neg_H_e_given_x + (probs * log_prob).sum() / probs.size(0)\n            self.count_layers += 1\n        else:\n            self.acc_count = self.acc_count + logits.size(0)\n            self.acc_probs = self.acc_probs + probs.sum(0)\n            self.acc_freq = self.acc_freq + (gates > 0).float().sum(0)\n            lsesq = torch.log(torch.exp(logits).sum(dim=-1)) ** 2\n            self.acc_lsesq = self.acc_lsesq + lsesq.sum()\n\n    def get_aux_loss_and_clear(self, eps=1e-8):\n        \"\"\"\n        Calculate and return the auxiliary loss based on the accumulated statistics.\n\n        Args:\n            eps (float): Small epsilon value for numerical stability.\n\n        Returns:\n            torch.Tensor: The calculated auxiliary loss.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            denominator = self.count_layers \n            p_e = self.p_e / denominator\n            H_e = -(p_e * (p_e + eps).log()).sum()\n            neg_H_e_given_x = self.neg_H_e_given_x / denominator\n            miloss = -(neg_H_e_given_x + H_e)\n            loss = miloss\n        else:\n            switchloss =  self.num_experts * (\n                F.normalize(self.acc_probs, p=1, dim=0) *\n                F.normalize(self.acc_freq, p=1, dim=0)\n            ).sum()\n            zloss = self.acc_lsesq / self.acc_count\n            loss = switchloss + 0.1 * zloss\n\n        self.init_aux_statistics()\n        return loss\n\n    def forward(self, x, skip_mask=None):\n        \"\"\"\n        Compute the top-k gating for the input.\n\n        See paper: https://arxiv.org/abs/1701.06538.\n\n        Args:\n            x (torch.Tensor): Input tensor with shape [batch_size, input_size].\n            skip_mask (torch.Tensor): Skip mask tensor (binary) with the same shape as `x`.\n            x: input Tensor with shape [batch_size, input_size]\n            train: a boolean - we only add noise at training time.\n            noise_epsilon: a float\n\n        Returns:\n            torch.Tensor: Top-k indices.\n            torch.Tensor: Top-k gating values.\n            torch.Tensor: Probability values for each expert.\n            gates: a Tensor with shape [batch_size, num_experts]\n            load: a Tensor with shape [num_experts]","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.gate.forward","uri":"program://ModuleFormer/function/moduleformer.utils.gate.forward#L188-L246","kind":"function","name":"forward","path":"moduleformer/utils/gate.py","language":"python","start_line":188,"end_line":246,"context_start_line":168,"context_end_line":251,"code":"            torch.Tensor: The calculated auxiliary loss.\n        \"\"\"\n        if self.aux_loss == 'mi':\n            denominator = self.count_layers \n            p_e = self.p_e / denominator\n            H_e = -(p_e * (p_e + eps).log()).sum()\n            neg_H_e_given_x = self.neg_H_e_given_x / denominator\n            miloss = -(neg_H_e_given_x + H_e)\n            loss = miloss\n        else:\n            switchloss =  self.num_experts * (\n                F.normalize(self.acc_probs, p=1, dim=0) *\n                F.normalize(self.acc_freq, p=1, dim=0)\n            ).sum()\n            zloss = self.acc_lsesq / self.acc_count\n            loss = switchloss + 0.1 * zloss\n\n        self.init_aux_statistics()\n        return loss\n\n    def forward(self, x, skip_mask=None):\n        \"\"\"\n        Compute the top-k gating for the input.\n\n        See paper: https://arxiv.org/abs/1701.06538.\n\n        Args:\n            x (torch.Tensor): Input tensor with shape [batch_size, input_size].\n            skip_mask (torch.Tensor): Skip mask tensor (binary) with the same shape as `x`.\n            x: input Tensor with shape [batch_size, input_size]\n            train: a boolean - we only add noise at training time.\n            noise_epsilon: a float\n\n        Returns:\n            torch.Tensor: Top-k indices.\n            torch.Tensor: Top-k gating values.\n            torch.Tensor: Probability values for each expert.\n            gates: a Tensor with shape [batch_size, num_experts]\n            load: a Tensor with shape [num_experts]\n        \"\"\"\n\n        if self.gate_type in ['linear', 'mlp']:\n            logits = self.w_gate(x)\n        elif self.gate_type == 'gmm':\n            z = self.w_gate(x)\n            logits = log_gmm_posterior(F.normalize(z, p=2, dim=-1), F.normalize(self.expert_centroids, p=2, dim=-1)) * self.temperature.exp()\n\n        probs = torch.softmax(logits, dim=1)\n        if skip_mask is not None:\n            probs = torch.masked_fill(probs, (skip_mask == 0), 0)\n            logits = torch.masked_fill(logits, (skip_mask == 0), 0)\n\n        if self.training and (self.sample_topk > 0):\n            _, top_km1_indices = probs.topk(self.top_k - self.sample_topk, dim=1)\n            masked_probs = probs + 1e-6\n            masked_probs[torch.arange(probs.size(0)).unsqueeze(\n                1), top_km1_indices] = 0\n            k_indices = torch.multinomial(masked_probs, self.sample_topk)\n            top_k_indices = torch.cat([top_km1_indices, k_indices], dim=-1)\n            top_k_gates = torch.gather(probs, 1, top_k_indices)\n        else:\n            top_k_gates, top_k_indices = probs.topk(self.top_k, dim=1)\n\n        # if self.top_k > 1:\n        #     top_k_gates = top_k_gates / (top_k_gates.sum(dim=1, keepdim=True) + 1e-6)\n        \n        # gate = torch.zeros_like(top_k_gates)\n        # gate[:, 0] = 1\n        # top_k_gates = (gate - top_k_gates).detach() + top_k_gates\n\n        zeros = torch.zeros_like(probs)\n        gates = zeros.scatter(1, top_k_indices, top_k_gates)\n        self.update_aux_statistics(probs, logits, gates, skip_mask)\n        if not self.acc_aux_loss:\n            self.loss = self.get_aux_loss_and_clear()\n        else:\n            self.loss = 0\n\n        return top_k_indices, top_k_gates, probs\n\n        # batch_gates, batch_index, expert_size, gates, index_sorted_experts = \\\n        #     compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n\n        # return batch_gates, batch_index, expert_size.tolist(), gates, index_sorted_experts","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe","uri":"program://ModuleFormer/module/moduleformer.utils.moe#L1-L214","kind":"module","name":"moduleformer.utils.moe","path":"moduleformer/utils/moe.py","language":"python","start_line":1,"end_line":214,"context_start_line":1,"context_end_line":214,"code":"import math\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .parallel_experts import ParallelExperts\nfrom .gate import top_k_gating, compute_gating\n\n\nclass MoE(nn.Module):\n    \"\"\"\n    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n    \n\n    Args:\n        input_size: integer - size of the input\n        head_size: integer - size of the expert's hidden layer\n        num_experts: an integer - number of experts\n        top_k: an integer - how many experts to use for each batch element\n        bias: a boolean - whether to include bias in linear layers\n        activation: an activation function to apply to expert's outputs\n        acc_aux_loss: a boolean - whether to accumulate auxiliary loss\n        hidden_size: an integer - hidden size of the experts\n        gating_dropout: a float - dropout rate for gating network\n        sample_topk: an integer - how many experts to sample during training\n        gating_size: an integer - size of the gating network\n        aux_loss: a string - type of auxiliary loss ('mi' or 'sparse')\n        gate_type: a string - type of gating mechanism ('mlp' or 'topk')\n    \"\"\"\n\n    def __init__(\n        self, \n        input_size, \n        head_size, \n        num_experts, \n        top_k,\n        bias=False, \n        activation=None, \n        acc_aux_loss=False,\n        hidden_size=None,\n        gating_dropout=0.0,\n        sample_topk=0,\n        gating_size=256,\n        aux_loss='mi',\n        gate_type='mlp',\n        ):\n        super(MoE, self).__init__()\n\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.head_size = head_size\n        self.bias = bias\n        self.experts = ParallelExperts(num_experts, input_size, head_size, bias)\n        if hidden_size is None:\n            hidden_size = head_size\n        self.output_experts = ParallelExperts(num_experts, hidden_size, input_size, bias)\n        self.top_k = min(top_k, self.num_experts)\n        self.activation = activation\n\n        self.gate = top_k_gating(\n            input_size=input_size, \n            num_experts=num_experts, \n            top_k=top_k, \n            acc_aux_loss=acc_aux_loss, \n            dropout=gating_dropout,\n            sample_topk=sample_topk,\n            hidden_size=gating_size,\n            aux_loss=aux_loss,\n            gate_type=gate_type,\n            )\n\n    def extra_repr(self):\n        return 'k={}'.format(\n            self.top_k)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get the accumulated auxiliary loss and clear it.\n\n        Returns:\n            float: Accumulated auxiliary loss.\n        \"\"\"\n\n        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:\n            moe_inp (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n\n        Returns:\n            float: Gating loss.\n        \"\"\"\n\n        top_k_indices, top_k_gates, probs = self.gate(moe_inp, skip_mask=skip_mask)\n        self.batch_gates, self.batch_index, expert_size, self.index_sorted_experts =\\\n            compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n        self.expert_size = expert_size.tolist()\n        return self.gate.loss\n\n    def forward(self, x, skip_mask=None, sample_topk=0, multiply_by_gates=True):\n        \"\"\"\n        Forward pass of the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        bsz, length, emb_size = x.size()\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n            skip_mask = skip_mask.flatten()[:, None]\n        x = x.reshape(-1, emb_size)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        h = self.experts(expert_inputs, self.expert_size)\n        h = self.activation(h)\n        expert_outputs = self.output_experts(h, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros(\n            (bsz * length, self.input_size),\n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        # assert torch.allclose(y, y_)\n        return y, loss\n\n    def map(self, x, skip_mask=None, sample_topk=0, return_indices=False):\n        \"\"\"\n        \n        Args:\n            x: tensor shape [batch_size, input_size]\n            train: a boolean scalar.\n            loss_coef: a scalar - multiplier on load-balancing losses\n\n        Returns:\n            y: a tensor with shape [batch_size, output_size].\n            extra_training_loss: a scalar.  This should be added into the overall\n            training loss of the model.  The backpropagation of this loss\n            encourages all experts to be approximately equally used across a batch.\n        \"\"\"\n        \"\"\"\n        Map input through the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            return_indices (bool): Whether to return expert indices.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n        bsz, length, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n        if skip_mask is not None:\n            skip_mask = skip_mask.view(-1, 1)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        expert_outputs = self.experts(expert_inputs, self.expert_size)\n\n        zeros = torch.zeros((bsz * length * self.top_k, self.head_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.index_sorted_experts, expert_outputs)\n        y = y.view(bsz, length, self.top_k, -1)\n        return y, loss\n\n    def reduce(self, x, multiply_by_gates=True):\n        \"\"\"\n        Reduce the mapped output.\n\n        Args:\n            x (Tensor): Mapped output tensor.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Reduced output tensor.\n        \"\"\"\n        \n        bsz, length, k, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n\n        expert_inputs = x[self.index_sorted_experts]\n        expert_outputs = self.output_experts(expert_inputs, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros((bsz * length, self.input_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        return y","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.MoE","uri":"program://ModuleFormer/class/moduleformer.utils.moe.MoE#L12-L214","kind":"class","name":"MoE","path":"moduleformer/utils/moe.py","language":"python","start_line":12,"end_line":214,"context_start_line":1,"context_end_line":214,"code":"import math\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .parallel_experts import ParallelExperts\nfrom .gate import top_k_gating, compute_gating\n\n\nclass MoE(nn.Module):\n    \"\"\"\n    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n    \n\n    Args:\n        input_size: integer - size of the input\n        head_size: integer - size of the expert's hidden layer\n        num_experts: an integer - number of experts\n        top_k: an integer - how many experts to use for each batch element\n        bias: a boolean - whether to include bias in linear layers\n        activation: an activation function to apply to expert's outputs\n        acc_aux_loss: a boolean - whether to accumulate auxiliary loss\n        hidden_size: an integer - hidden size of the experts\n        gating_dropout: a float - dropout rate for gating network\n        sample_topk: an integer - how many experts to sample during training\n        gating_size: an integer - size of the gating network\n        aux_loss: a string - type of auxiliary loss ('mi' or 'sparse')\n        gate_type: a string - type of gating mechanism ('mlp' or 'topk')\n    \"\"\"\n\n    def __init__(\n        self, \n        input_size, \n        head_size, \n        num_experts, \n        top_k,\n        bias=False, \n        activation=None, \n        acc_aux_loss=False,\n        hidden_size=None,\n        gating_dropout=0.0,\n        sample_topk=0,\n        gating_size=256,\n        aux_loss='mi',\n        gate_type='mlp',\n        ):\n        super(MoE, self).__init__()\n\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.head_size = head_size\n        self.bias = bias\n        self.experts = ParallelExperts(num_experts, input_size, head_size, bias)\n        if hidden_size is None:\n            hidden_size = head_size\n        self.output_experts = ParallelExperts(num_experts, hidden_size, input_size, bias)\n        self.top_k = min(top_k, self.num_experts)\n        self.activation = activation\n\n        self.gate = top_k_gating(\n            input_size=input_size, \n            num_experts=num_experts, \n            top_k=top_k, \n            acc_aux_loss=acc_aux_loss, \n            dropout=gating_dropout,\n            sample_topk=sample_topk,\n            hidden_size=gating_size,\n            aux_loss=aux_loss,\n            gate_type=gate_type,\n            )\n\n    def extra_repr(self):\n        return 'k={}'.format(\n            self.top_k)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get the accumulated auxiliary loss and clear it.\n\n        Returns:\n            float: Accumulated auxiliary loss.\n        \"\"\"\n\n        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:\n            moe_inp (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n\n        Returns:\n            float: Gating loss.\n        \"\"\"\n\n        top_k_indices, top_k_gates, probs = self.gate(moe_inp, skip_mask=skip_mask)\n        self.batch_gates, self.batch_index, expert_size, self.index_sorted_experts =\\\n            compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n        self.expert_size = expert_size.tolist()\n        return self.gate.loss\n\n    def forward(self, x, skip_mask=None, sample_topk=0, multiply_by_gates=True):\n        \"\"\"\n        Forward pass of the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        bsz, length, emb_size = x.size()\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n            skip_mask = skip_mask.flatten()[:, None]\n        x = x.reshape(-1, emb_size)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        h = self.experts(expert_inputs, self.expert_size)\n        h = self.activation(h)\n        expert_outputs = self.output_experts(h, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros(\n            (bsz * length, self.input_size),\n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        # assert torch.allclose(y, y_)\n        return y, loss\n\n    def map(self, x, skip_mask=None, sample_topk=0, return_indices=False):\n        \"\"\"\n        \n        Args:\n            x: tensor shape [batch_size, input_size]\n            train: a boolean scalar.\n            loss_coef: a scalar - multiplier on load-balancing losses\n\n        Returns:\n            y: a tensor with shape [batch_size, output_size].\n            extra_training_loss: a scalar.  This should be added into the overall\n            training loss of the model.  The backpropagation of this loss\n            encourages all experts to be approximately equally used across a batch.\n        \"\"\"\n        \"\"\"\n        Map input through the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            return_indices (bool): Whether to return expert indices.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n        bsz, length, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n        if skip_mask is not None:\n            skip_mask = skip_mask.view(-1, 1)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        expert_outputs = self.experts(expert_inputs, self.expert_size)\n\n        zeros = torch.zeros((bsz * length * self.top_k, self.head_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.index_sorted_experts, expert_outputs)\n        y = y.view(bsz, length, self.top_k, -1)\n        return y, loss\n\n    def reduce(self, x, multiply_by_gates=True):\n        \"\"\"\n        Reduce the mapped output.\n\n        Args:\n            x (Tensor): Mapped output tensor.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Reduced output tensor.\n        \"\"\"\n        \n        bsz, length, k, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n\n        expert_inputs = x[self.index_sorted_experts]\n        expert_outputs = self.output_experts(expert_inputs, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros((bsz * length, self.input_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        return y","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.__init__","uri":"program://ModuleFormer/function/moduleformer.utils.moe.__init__#L33-L72","kind":"function","name":"__init__","path":"moduleformer/utils/moe.py","language":"python","start_line":33,"end_line":72,"context_start_line":13,"context_end_line":92,"code":"    \"\"\"\n    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n    \n\n    Args:\n        input_size: integer - size of the input\n        head_size: integer - size of the expert's hidden layer\n        num_experts: an integer - number of experts\n        top_k: an integer - how many experts to use for each batch element\n        bias: a boolean - whether to include bias in linear layers\n        activation: an activation function to apply to expert's outputs\n        acc_aux_loss: a boolean - whether to accumulate auxiliary loss\n        hidden_size: an integer - hidden size of the experts\n        gating_dropout: a float - dropout rate for gating network\n        sample_topk: an integer - how many experts to sample during training\n        gating_size: an integer - size of the gating network\n        aux_loss: a string - type of auxiliary loss ('mi' or 'sparse')\n        gate_type: a string - type of gating mechanism ('mlp' or 'topk')\n    \"\"\"\n\n    def __init__(\n        self, \n        input_size, \n        head_size, \n        num_experts, \n        top_k,\n        bias=False, \n        activation=None, \n        acc_aux_loss=False,\n        hidden_size=None,\n        gating_dropout=0.0,\n        sample_topk=0,\n        gating_size=256,\n        aux_loss='mi',\n        gate_type='mlp',\n        ):\n        super(MoE, self).__init__()\n\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.head_size = head_size\n        self.bias = bias\n        self.experts = ParallelExperts(num_experts, input_size, head_size, bias)\n        if hidden_size is None:\n            hidden_size = head_size\n        self.output_experts = ParallelExperts(num_experts, hidden_size, input_size, bias)\n        self.top_k = min(top_k, self.num_experts)\n        self.activation = activation\n\n        self.gate = top_k_gating(\n            input_size=input_size, \n            num_experts=num_experts, \n            top_k=top_k, \n            acc_aux_loss=acc_aux_loss, \n            dropout=gating_dropout,\n            sample_topk=sample_topk,\n            hidden_size=gating_size,\n            aux_loss=aux_loss,\n            gate_type=gate_type,\n            )\n\n    def extra_repr(self):\n        return 'k={}'.format(\n            self.top_k)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get the accumulated auxiliary loss and clear it.\n\n        Returns:\n            float: Accumulated auxiliary loss.\n        \"\"\"\n\n        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.extra_repr","uri":"program://ModuleFormer/function/moduleformer.utils.moe.extra_repr#L74-L76","kind":"function","name":"extra_repr","path":"moduleformer/utils/moe.py","language":"python","start_line":74,"end_line":76,"context_start_line":54,"context_end_line":96,"code":"        self.bias = bias\n        self.experts = ParallelExperts(num_experts, input_size, head_size, bias)\n        if hidden_size is None:\n            hidden_size = head_size\n        self.output_experts = ParallelExperts(num_experts, hidden_size, input_size, bias)\n        self.top_k = min(top_k, self.num_experts)\n        self.activation = activation\n\n        self.gate = top_k_gating(\n            input_size=input_size, \n            num_experts=num_experts, \n            top_k=top_k, \n            acc_aux_loss=acc_aux_loss, \n            dropout=gating_dropout,\n            sample_topk=sample_topk,\n            hidden_size=gating_size,\n            aux_loss=aux_loss,\n            gate_type=gate_type,\n            )\n\n    def extra_repr(self):\n        return 'k={}'.format(\n            self.top_k)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get the accumulated auxiliary loss and clear it.\n\n        Returns:\n            float: Accumulated auxiliary loss.\n        \"\"\"\n\n        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:\n            moe_inp (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n\n        Returns:","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.get_aux_loss_and_clear","uri":"program://ModuleFormer/function/moduleformer.utils.moe.get_aux_loss_and_clear#L78-L86","kind":"function","name":"get_aux_loss_and_clear","path":"moduleformer/utils/moe.py","language":"python","start_line":78,"end_line":86,"context_start_line":58,"context_end_line":106,"code":"        self.output_experts = ParallelExperts(num_experts, hidden_size, input_size, bias)\n        self.top_k = min(top_k, self.num_experts)\n        self.activation = activation\n\n        self.gate = top_k_gating(\n            input_size=input_size, \n            num_experts=num_experts, \n            top_k=top_k, \n            acc_aux_loss=acc_aux_loss, \n            dropout=gating_dropout,\n            sample_topk=sample_topk,\n            hidden_size=gating_size,\n            aux_loss=aux_loss,\n            gate_type=gate_type,\n            )\n\n    def extra_repr(self):\n        return 'k={}'.format(\n            self.top_k)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get the accumulated auxiliary loss and clear it.\n\n        Returns:\n            float: Accumulated auxiliary loss.\n        \"\"\"\n\n        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:\n            moe_inp (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n\n        Returns:\n            float: Gating loss.\n        \"\"\"\n\n        top_k_indices, top_k_gates, probs = self.gate(moe_inp, skip_mask=skip_mask)\n        self.batch_gates, self.batch_index, expert_size, self.index_sorted_experts =\\\n            compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n        self.expert_size = expert_size.tolist()\n        return self.gate.loss\n\n    def forward(self, x, skip_mask=None, sample_topk=0, multiply_by_gates=True):","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.compute_gate","uri":"program://ModuleFormer/function/moduleformer.utils.moe.compute_gate#L88-L104","kind":"function","name":"compute_gate","path":"moduleformer/utils/moe.py","language":"python","start_line":88,"end_line":104,"context_start_line":68,"context_end_line":124,"code":"            sample_topk=sample_topk,\n            hidden_size=gating_size,\n            aux_loss=aux_loss,\n            gate_type=gate_type,\n            )\n\n    def extra_repr(self):\n        return 'k={}'.format(\n            self.top_k)\n\n    def get_aux_loss_and_clear(self):\n        \"\"\"\n        Get the accumulated auxiliary loss and clear it.\n\n        Returns:\n            float: Accumulated auxiliary loss.\n        \"\"\"\n\n        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:\n            moe_inp (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n\n        Returns:\n            float: Gating loss.\n        \"\"\"\n\n        top_k_indices, top_k_gates, probs = self.gate(moe_inp, skip_mask=skip_mask)\n        self.batch_gates, self.batch_index, expert_size, self.index_sorted_experts =\\\n            compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n        self.expert_size = expert_size.tolist()\n        return self.gate.loss\n\n    def forward(self, x, skip_mask=None, sample_topk=0, multiply_by_gates=True):\n        \"\"\"\n        Forward pass of the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        bsz, length, emb_size = x.size()\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n            skip_mask = skip_mask.flatten()[:, None]","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.forward","uri":"program://ModuleFormer/function/moduleformer.utils.moe.forward#L106-L142","kind":"function","name":"forward","path":"moduleformer/utils/moe.py","language":"python","start_line":106,"end_line":142,"context_start_line":86,"context_end_line":162,"code":"        return self.gate.get_aux_loss_and_clear()\n\n    def compute_gate(self, moe_inp, skip_mask=None):\n        \"\"\"\n        Compute gating for the mixture of experts.\n\n        Args:\n            moe_inp (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n\n        Returns:\n            float: Gating loss.\n        \"\"\"\n\n        top_k_indices, top_k_gates, probs = self.gate(moe_inp, skip_mask=skip_mask)\n        self.batch_gates, self.batch_index, expert_size, self.index_sorted_experts =\\\n            compute_gating(self.top_k, probs, top_k_gates, top_k_indices)\n        self.expert_size = expert_size.tolist()\n        return self.gate.loss\n\n    def forward(self, x, skip_mask=None, sample_topk=0, multiply_by_gates=True):\n        \"\"\"\n        Forward pass of the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        bsz, length, emb_size = x.size()\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n            skip_mask = skip_mask.flatten()[:, None]\n        x = x.reshape(-1, emb_size)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        h = self.experts(expert_inputs, self.expert_size)\n        h = self.activation(h)\n        expert_outputs = self.output_experts(h, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros(\n            (bsz * length, self.input_size),\n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        # assert torch.allclose(y, y_)\n        return y, loss\n\n    def map(self, x, skip_mask=None, sample_topk=0, return_indices=False):\n        \"\"\"\n        \n        Args:\n            x: tensor shape [batch_size, input_size]\n            train: a boolean scalar.\n            loss_coef: a scalar - multiplier on load-balancing losses\n\n        Returns:\n            y: a tensor with shape [batch_size, output_size].\n            extra_training_loss: a scalar.  This should be added into the overall\n            training loss of the model.  The backpropagation of this loss\n            encourages all experts to be approximately equally used across a batch.\n        \"\"\"\n        \"\"\"\n        Map input through the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.map","uri":"program://ModuleFormer/function/moduleformer.utils.moe.map#L144-L187","kind":"function","name":"map","path":"moduleformer/utils/moe.py","language":"python","start_line":144,"end_line":187,"context_start_line":124,"context_end_line":207,"code":"            skip_mask = skip_mask.flatten()[:, None]\n        x = x.reshape(-1, emb_size)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        h = self.experts(expert_inputs, self.expert_size)\n        h = self.activation(h)\n        expert_outputs = self.output_experts(h, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros(\n            (bsz * length, self.input_size),\n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        # assert torch.allclose(y, y_)\n        return y, loss\n\n    def map(self, x, skip_mask=None, sample_topk=0, return_indices=False):\n        \"\"\"\n        \n        Args:\n            x: tensor shape [batch_size, input_size]\n            train: a boolean scalar.\n            loss_coef: a scalar - multiplier on load-balancing losses\n\n        Returns:\n            y: a tensor with shape [batch_size, output_size].\n            extra_training_loss: a scalar.  This should be added into the overall\n            training loss of the model.  The backpropagation of this loss\n            encourages all experts to be approximately equally used across a batch.\n        \"\"\"\n        \"\"\"\n        Map input through the mixture of experts layer.\n\n        Args:\n            x (Tensor): Input tensor.\n            skip_mask (Tensor): Skip mask tensor.\n            sample_topk (int): Number of experts to sample during training.\n            return_indices (bool): Whether to return expert indices.\n\n        Returns:\n            Tensor: Output tensor.\n            float: Gating loss.\n        \"\"\"\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n        bsz, length, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n        if skip_mask is not None:\n            skip_mask = skip_mask.view(-1, 1)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        expert_outputs = self.experts(expert_inputs, self.expert_size)\n\n        zeros = torch.zeros((bsz * length * self.top_k, self.head_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.index_sorted_experts, expert_outputs)\n        y = y.view(bsz, length, self.top_k, -1)\n        return y, loss\n\n    def reduce(self, x, multiply_by_gates=True):\n        \"\"\"\n        Reduce the mapped output.\n\n        Args:\n            x (Tensor): Mapped output tensor.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Reduced output tensor.\n        \"\"\"\n        \n        bsz, length, k, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n\n        expert_inputs = x[self.index_sorted_experts]\n        expert_outputs = self.output_experts(expert_inputs, self.expert_size)\n\n        if multiply_by_gates:","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.moe.reduce","uri":"program://ModuleFormer/function/moduleformer.utils.moe.reduce#L189-L214","kind":"function","name":"reduce","path":"moduleformer/utils/moe.py","language":"python","start_line":189,"end_line":214,"context_start_line":169,"context_end_line":214,"code":"            float: Gating loss.\n        \"\"\"\n        if skip_mask is not None:\n            assert x.size()[:-1] == skip_mask.size(), \\\n                    \"Skip mask should be same shape as `x`\"\n        bsz, length, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n        if skip_mask is not None:\n            skip_mask = skip_mask.view(-1, 1)\n        loss = self.compute_gate(x, skip_mask)\n\n        expert_inputs = x[self.batch_index]\n        expert_outputs = self.experts(expert_inputs, self.expert_size)\n\n        zeros = torch.zeros((bsz * length * self.top_k, self.head_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.index_sorted_experts, expert_outputs)\n        y = y.view(bsz, length, self.top_k, -1)\n        return y, loss\n\n    def reduce(self, x, multiply_by_gates=True):\n        \"\"\"\n        Reduce the mapped output.\n\n        Args:\n            x (Tensor): Mapped output tensor.\n            multiply_by_gates (bool): Whether to multiply outputs by gating values.\n\n        Returns:\n            Tensor: Reduced output tensor.\n        \"\"\"\n        \n        bsz, length, k, emb_size = x.size()\n        x = x.reshape(-1, emb_size)\n\n        expert_inputs = x[self.index_sorted_experts]\n        expert_outputs = self.output_experts(expert_inputs, self.expert_size)\n\n        if multiply_by_gates:\n            expert_outputs = expert_outputs * self.batch_gates[:, None]\n\n        zeros = torch.zeros((bsz * length, self.input_size), \n            dtype=expert_outputs.dtype, device=expert_outputs.device)\n        y = zeros.index_add(0, self.batch_index, expert_outputs)\n        y = y.view(bsz, length, self.input_size)\n        return y","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts","uri":"program://ModuleFormer/module/moduleformer.utils.parallel_experts#L1-L196","kind":"module","name":"moduleformer.utils.parallel_experts","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":1,"end_line":196,"context_start_line":1,"context_end_line":196,"code":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.cuda.amp import custom_fwd, custom_bwd\nfrom typing import Any, Dict, List, Optional\nfrom torch import Tensor\n\n\nclass ParallelLinear(torch.autograd.Function):\n    \"\"\"\n    A custom autograd function for Parallel Linear operation.\n    \"\"\"\n\n    @staticmethod\n    @custom_fwd\n    def forward(ctx, input, expert_size_list, weight, bias=None):\n        \"\"\"\n        Forward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        # expert_size_list: List[int] = expert_size.tolist()\n        output = ParallelLinear.forward_scriptable(input, expert_size_list, weight, bias)\n        # assert torch.allclose(ParallelLinear._forward_scriptable(input, expert_size, weight, bias),  output)\n        ctx.save_for_backward(input, weight, bias)\n        ctx.expert_size_list = expert_size_list\n        return output\n\n    @staticmethod\n    @torch.jit.script\n    def forward_scriptable(input: Tensor, expert_size_list: List[int],\n                           weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable forward pass of the ParallelLinear operation.\n\n        Args:\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        output_buf: Tensor = torch.empty((input.size(0), weight.size(2)),\n                                         device=input.device, dtype=input.dtype)\n        num_linears = weight.size(0)\n\n        input_list = input.split(expert_size_list, dim=0)\n        output_buf_list = output_buf.split(expert_size_list)\n\n        for i in range(num_linears):\n            torch.mm(input_list[i], weight[i], out=output_buf_list[i])\n\n        if bias is not None:\n            for i in range(num_linears):\n                output_buf_list[i].add_(bias[i])\n\n        output = output_buf\n        return output\n\n    @staticmethod\n    @custom_bwd\n    def backward(ctx, grad_out):\n        \"\"\"\n        Backward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            grad_out (Tensor): Gradient of the output.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        input, weight, bias = ctx.saved_tensors\n        expert_size_list = ctx.expert_size_list\n        return ParallelLinear.backward_scriptable(\n            grad_out, input, expert_size_list,\n            weight, bias\n        )\n\n    @staticmethod\n    @torch.jit.script\n    def backward_scriptable(grad_out: Tensor,\n                 input: Tensor, expert_size_list: List[int],\n                 weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable backward pass of the ParallelLinear operation.\n\n        Args:\n            grad_out (Tensor): Gradient of the output.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        num_linears = weight.size(0)\n        input_list = input.t().split(expert_size_list, dim=1)\n        grad_list = grad_out.split(expert_size_list, dim=0)\n\n        d_input_buf = torch.empty_like(input)\n        d_input_buf_list = d_input_buf.split(expert_size_list, dim=0)\n        d_weight_buf = torch.empty_like(weight)\n\n        weight_t = weight.permute(0, 2, 1)\n\n        for i in range(num_linears):\n            torch.mm(grad_list[i], weight_t[i], out=d_input_buf_list[i])\n            torch.mm(input_list[i], grad_list[i], out=d_weight_buf[i])\n\n        d_input = d_input_buf\n        d_weight = d_weight_buf\n\n        if bias is not None:\n            d_bias_buf = torch.empty_like(bias)\n            for i in range(num_linears):\n                torch.sum(grad_list[i], dim=0, keepdim=False, out=d_bias_buf[i])\n            d_bias = d_bias_buf\n        else:\n            d_bias = None\n\n        return d_input, None, d_weight, d_bias\n\n\nclass ParallelExperts(nn.Module):\n    def __init__(self, num_experts, input_size, output_size, bias=False) -> None:\n        \"\"\"\n        Initialize the ParallelExperts module.\n\n        Args:\n            num_experts (int): Number of experts.\n            input_size (int): Size of the input.\n            output_size (int): Size of the output.\n            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))\n        else:\n            self.bias = None\n        self.reset_parameters()\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.output_size = output_size\n\n    def extra_repr(self):\n        return 'num_experts={}, input_size={}, output_size={}'.format(\n            self.num_experts, self.input_size, self.output_size)\n\n    def reset_parameters(self) -> None:\n        \"\"\"\n        Reset the parameters of the model.\n        \"\"\"\n        # std = math.sqrt(2.0 / float(self.weight.size(1) + self.weight.size(2)))\n        # a = math.sqrt(3.0) * std\n        nn.init.uniform_(self.weight, -1. / self.weight.size(1), 1. / self.weight.size(1))\n        if self.bias is not None:\n            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            nn.init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, inputs, expert_size):\n        \"\"\"\n        Forward pass of the ParallelExperts module.\n\n        Args:\n            inputs (Tensor): Input tensor.\n            expert_size: Expert size information.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        results = ParallelLinear.apply(inputs, expert_size, self.weight, self.bias)\n        # expert_size_list: List[int] = expert_size.tolist()\n        # input_list = inputs.split(expert_size_list, dim=0)\n        # output_list = []\n        # for i in range(self.num_experts):\n        #     output_list.append(self.input_experts[i](input_list[i]))\n        # results = torch.cat(output_list, dim=0)\n        return results","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.ParallelLinear","uri":"program://ModuleFormer/class/moduleformer.utils.parallel_experts.ParallelLinear#L10-L134","kind":"class","name":"ParallelLinear","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":10,"end_line":134,"context_start_line":1,"context_end_line":154,"code":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.cuda.amp import custom_fwd, custom_bwd\nfrom typing import Any, Dict, List, Optional\nfrom torch import Tensor\n\n\nclass ParallelLinear(torch.autograd.Function):\n    \"\"\"\n    A custom autograd function for Parallel Linear operation.\n    \"\"\"\n\n    @staticmethod\n    @custom_fwd\n    def forward(ctx, input, expert_size_list, weight, bias=None):\n        \"\"\"\n        Forward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        # expert_size_list: List[int] = expert_size.tolist()\n        output = ParallelLinear.forward_scriptable(input, expert_size_list, weight, bias)\n        # assert torch.allclose(ParallelLinear._forward_scriptable(input, expert_size, weight, bias),  output)\n        ctx.save_for_backward(input, weight, bias)\n        ctx.expert_size_list = expert_size_list\n        return output\n\n    @staticmethod\n    @torch.jit.script\n    def forward_scriptable(input: Tensor, expert_size_list: List[int],\n                           weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable forward pass of the ParallelLinear operation.\n\n        Args:\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        output_buf: Tensor = torch.empty((input.size(0), weight.size(2)),\n                                         device=input.device, dtype=input.dtype)\n        num_linears = weight.size(0)\n\n        input_list = input.split(expert_size_list, dim=0)\n        output_buf_list = output_buf.split(expert_size_list)\n\n        for i in range(num_linears):\n            torch.mm(input_list[i], weight[i], out=output_buf_list[i])\n\n        if bias is not None:\n            for i in range(num_linears):\n                output_buf_list[i].add_(bias[i])\n\n        output = output_buf\n        return output\n\n    @staticmethod\n    @custom_bwd\n    def backward(ctx, grad_out):\n        \"\"\"\n        Backward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            grad_out (Tensor): Gradient of the output.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        input, weight, bias = ctx.saved_tensors\n        expert_size_list = ctx.expert_size_list\n        return ParallelLinear.backward_scriptable(\n            grad_out, input, expert_size_list,\n            weight, bias\n        )\n\n    @staticmethod\n    @torch.jit.script\n    def backward_scriptable(grad_out: Tensor,\n                 input: Tensor, expert_size_list: List[int],\n                 weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable backward pass of the ParallelLinear operation.\n\n        Args:\n            grad_out (Tensor): Gradient of the output.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        num_linears = weight.size(0)\n        input_list = input.t().split(expert_size_list, dim=1)\n        grad_list = grad_out.split(expert_size_list, dim=0)\n\n        d_input_buf = torch.empty_like(input)\n        d_input_buf_list = d_input_buf.split(expert_size_list, dim=0)\n        d_weight_buf = torch.empty_like(weight)\n\n        weight_t = weight.permute(0, 2, 1)\n\n        for i in range(num_linears):\n            torch.mm(grad_list[i], weight_t[i], out=d_input_buf_list[i])\n            torch.mm(input_list[i], grad_list[i], out=d_weight_buf[i])\n\n        d_input = d_input_buf\n        d_weight = d_weight_buf\n\n        if bias is not None:\n            d_bias_buf = torch.empty_like(bias)\n            for i in range(num_linears):\n                torch.sum(grad_list[i], dim=0, keepdim=False, out=d_bias_buf[i])\n            d_bias = d_bias_buf\n        else:\n            d_bias = None\n\n        return d_input, None, d_weight, d_bias\n\n\nclass ParallelExperts(nn.Module):\n    def __init__(self, num_experts, input_size, output_size, bias=False) -> None:\n        \"\"\"\n        Initialize the ParallelExperts module.\n\n        Args:\n            num_experts (int): Number of experts.\n            input_size (int): Size of the input.\n            output_size (int): Size of the output.\n            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.ParallelExperts","uri":"program://ModuleFormer/class/moduleformer.utils.parallel_experts.ParallelExperts#L137-L196","kind":"class","name":"ParallelExperts","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":137,"end_line":196,"context_start_line":117,"context_end_line":196,"code":"        weight_t = weight.permute(0, 2, 1)\n\n        for i in range(num_linears):\n            torch.mm(grad_list[i], weight_t[i], out=d_input_buf_list[i])\n            torch.mm(input_list[i], grad_list[i], out=d_weight_buf[i])\n\n        d_input = d_input_buf\n        d_weight = d_weight_buf\n\n        if bias is not None:\n            d_bias_buf = torch.empty_like(bias)\n            for i in range(num_linears):\n                torch.sum(grad_list[i], dim=0, keepdim=False, out=d_bias_buf[i])\n            d_bias = d_bias_buf\n        else:\n            d_bias = None\n\n        return d_input, None, d_weight, d_bias\n\n\nclass ParallelExperts(nn.Module):\n    def __init__(self, num_experts, input_size, output_size, bias=False) -> None:\n        \"\"\"\n        Initialize the ParallelExperts module.\n\n        Args:\n            num_experts (int): Number of experts.\n            input_size (int): Size of the input.\n            output_size (int): Size of the output.\n            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))\n        else:\n            self.bias = None\n        self.reset_parameters()\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.output_size = output_size\n\n    def extra_repr(self):\n        return 'num_experts={}, input_size={}, output_size={}'.format(\n            self.num_experts, self.input_size, self.output_size)\n\n    def reset_parameters(self) -> None:\n        \"\"\"\n        Reset the parameters of the model.\n        \"\"\"\n        # std = math.sqrt(2.0 / float(self.weight.size(1) + self.weight.size(2)))\n        # a = math.sqrt(3.0) * std\n        nn.init.uniform_(self.weight, -1. / self.weight.size(1), 1. / self.weight.size(1))\n        if self.bias is not None:\n            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            nn.init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, inputs, expert_size):\n        \"\"\"\n        Forward pass of the ParallelExperts module.\n\n        Args:\n            inputs (Tensor): Input tensor.\n            expert_size: Expert size information.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        results = ParallelLinear.apply(inputs, expert_size, self.weight, self.bias)\n        # expert_size_list: List[int] = expert_size.tolist()\n        # input_list = inputs.split(expert_size_list, dim=0)\n        # output_list = []\n        # for i in range(self.num_experts):\n        #     output_list.append(self.input_experts[i](input_list[i]))\n        # results = torch.cat(output_list, dim=0)\n        return results","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.forward","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.forward#L178-L196","kind":"function","name":"forward","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":178,"end_line":196,"context_start_line":158,"context_end_line":196,"code":"        self.num_experts = num_experts\n        self.input_size = input_size\n        self.output_size = output_size\n\n    def extra_repr(self):\n        return 'num_experts={}, input_size={}, output_size={}'.format(\n            self.num_experts, self.input_size, self.output_size)\n\n    def reset_parameters(self) -> None:\n        \"\"\"\n        Reset the parameters of the model.\n        \"\"\"\n        # std = math.sqrt(2.0 / float(self.weight.size(1) + self.weight.size(2)))\n        # a = math.sqrt(3.0) * std\n        nn.init.uniform_(self.weight, -1. / self.weight.size(1), 1. / self.weight.size(1))\n        if self.bias is not None:\n            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            nn.init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, inputs, expert_size):\n        \"\"\"\n        Forward pass of the ParallelExperts module.\n\n        Args:\n            inputs (Tensor): Input tensor.\n            expert_size: Expert size information.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        results = ParallelLinear.apply(inputs, expert_size, self.weight, self.bias)\n        # expert_size_list: List[int] = expert_size.tolist()\n        # input_list = inputs.split(expert_size_list, dim=0)\n        # output_list = []\n        # for i in range(self.num_experts):\n        #     output_list.append(self.input_experts[i](input_list[i]))\n        # results = torch.cat(output_list, dim=0)\n        return results","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.forward_scriptable","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.forward_scriptable#L40-L69","kind":"function","name":"forward_scriptable","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":40,"end_line":69,"context_start_line":20,"context_end_line":89,"code":"\n        Args:\n            ctx: Context object.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        # expert_size_list: List[int] = expert_size.tolist()\n        output = ParallelLinear.forward_scriptable(input, expert_size_list, weight, bias)\n        # assert torch.allclose(ParallelLinear._forward_scriptable(input, expert_size, weight, bias),  output)\n        ctx.save_for_backward(input, weight, bias)\n        ctx.expert_size_list = expert_size_list\n        return output\n\n    @staticmethod\n    @torch.jit.script\n    def forward_scriptable(input: Tensor, expert_size_list: List[int],\n                           weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable forward pass of the ParallelLinear operation.\n\n        Args:\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        output_buf: Tensor = torch.empty((input.size(0), weight.size(2)),\n                                         device=input.device, dtype=input.dtype)\n        num_linears = weight.size(0)\n\n        input_list = input.split(expert_size_list, dim=0)\n        output_buf_list = output_buf.split(expert_size_list)\n\n        for i in range(num_linears):\n            torch.mm(input_list[i], weight[i], out=output_buf_list[i])\n\n        if bias is not None:\n            for i in range(num_linears):\n                output_buf_list[i].add_(bias[i])\n\n        output = output_buf\n        return output\n\n    @staticmethod\n    @custom_bwd\n    def backward(ctx, grad_out):\n        \"\"\"\n        Backward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            grad_out (Tensor): Gradient of the output.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        input, weight, bias = ctx.saved_tensors\n        expert_size_list = ctx.expert_size_list\n        return ParallelLinear.backward_scriptable(\n            grad_out, input, expert_size_list,\n            weight, bias\n        )","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.backward","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.backward#L73-L89","kind":"function","name":"backward","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":73,"end_line":89,"context_start_line":53,"context_end_line":109,"code":"        \"\"\"\n        output_buf: Tensor = torch.empty((input.size(0), weight.size(2)),\n                                         device=input.device, dtype=input.dtype)\n        num_linears = weight.size(0)\n\n        input_list = input.split(expert_size_list, dim=0)\n        output_buf_list = output_buf.split(expert_size_list)\n\n        for i in range(num_linears):\n            torch.mm(input_list[i], weight[i], out=output_buf_list[i])\n\n        if bias is not None:\n            for i in range(num_linears):\n                output_buf_list[i].add_(bias[i])\n\n        output = output_buf\n        return output\n\n    @staticmethod\n    @custom_bwd\n    def backward(ctx, grad_out):\n        \"\"\"\n        Backward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            grad_out (Tensor): Gradient of the output.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        input, weight, bias = ctx.saved_tensors\n        expert_size_list = ctx.expert_size_list\n        return ParallelLinear.backward_scriptable(\n            grad_out, input, expert_size_list,\n            weight, bias\n        )\n\n    @staticmethod\n    @torch.jit.script\n    def backward_scriptable(grad_out: Tensor,\n                 input: Tensor, expert_size_list: List[int],\n                 weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable backward pass of the ParallelLinear operation.\n\n        Args:\n            grad_out (Tensor): Gradient of the output.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        num_linears = weight.size(0)","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.backward_scriptable","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.backward_scriptable#L93-L134","kind":"function","name":"backward_scriptable","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":93,"end_line":134,"context_start_line":73,"context_end_line":154,"code":"    def backward(ctx, grad_out):\n        \"\"\"\n        Backward pass of the ParallelLinear operation.\n\n        Args:\n            ctx: Context object.\n            grad_out (Tensor): Gradient of the output.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        input, weight, bias = ctx.saved_tensors\n        expert_size_list = ctx.expert_size_list\n        return ParallelLinear.backward_scriptable(\n            grad_out, input, expert_size_list,\n            weight, bias\n        )\n\n    @staticmethod\n    @torch.jit.script\n    def backward_scriptable(grad_out: Tensor,\n                 input: Tensor, expert_size_list: List[int],\n                 weight: Tensor, bias: Optional[Tensor]):\n        \"\"\"\n        Scriptable backward pass of the ParallelLinear operation.\n\n        Args:\n            grad_out (Tensor): Gradient of the output.\n            input (Tensor): Input tensor.\n            expert_size_list (List[int]): List of expert sizes.\n            weight (Tensor): Weight tensor.\n            bias (Optional[Tensor]): Bias tensor.\n\n        Returns:\n            Tuple of Tensors: Gradients with respect to input, weight, and bias.\n        \"\"\"\n        num_linears = weight.size(0)\n        input_list = input.t().split(expert_size_list, dim=1)\n        grad_list = grad_out.split(expert_size_list, dim=0)\n\n        d_input_buf = torch.empty_like(input)\n        d_input_buf_list = d_input_buf.split(expert_size_list, dim=0)\n        d_weight_buf = torch.empty_like(weight)\n\n        weight_t = weight.permute(0, 2, 1)\n\n        for i in range(num_linears):\n            torch.mm(grad_list[i], weight_t[i], out=d_input_buf_list[i])\n            torch.mm(input_list[i], grad_list[i], out=d_weight_buf[i])\n\n        d_input = d_input_buf\n        d_weight = d_weight_buf\n\n        if bias is not None:\n            d_bias_buf = torch.empty_like(bias)\n            for i in range(num_linears):\n                torch.sum(grad_list[i], dim=0, keepdim=False, out=d_bias_buf[i])\n            d_bias = d_bias_buf\n        else:\n            d_bias = None\n\n        return d_input, None, d_weight, d_bias\n\n\nclass ParallelExperts(nn.Module):\n    def __init__(self, num_experts, input_size, output_size, bias=False) -> None:\n        \"\"\"\n        Initialize the ParallelExperts module.\n\n        Args:\n            num_experts (int): Number of experts.\n            input_size (int): Size of the input.\n            output_size (int): Size of the output.\n            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.__init__","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.__init__#L138-L160","kind":"function","name":"__init__","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":138,"end_line":160,"context_start_line":118,"context_end_line":180,"code":"\n        for i in range(num_linears):\n            torch.mm(grad_list[i], weight_t[i], out=d_input_buf_list[i])\n            torch.mm(input_list[i], grad_list[i], out=d_weight_buf[i])\n\n        d_input = d_input_buf\n        d_weight = d_weight_buf\n\n        if bias is not None:\n            d_bias_buf = torch.empty_like(bias)\n            for i in range(num_linears):\n                torch.sum(grad_list[i], dim=0, keepdim=False, out=d_bias_buf[i])\n            d_bias = d_bias_buf\n        else:\n            d_bias = None\n\n        return d_input, None, d_weight, d_bias\n\n\nclass ParallelExperts(nn.Module):\n    def __init__(self, num_experts, input_size, output_size, bias=False) -> None:\n        \"\"\"\n        Initialize the ParallelExperts module.\n\n        Args:\n            num_experts (int): Number of experts.\n            input_size (int): Size of the input.\n            output_size (int): Size of the output.\n            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))\n        else:\n            self.bias = None\n        self.reset_parameters()\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.output_size = output_size\n\n    def extra_repr(self):\n        return 'num_experts={}, input_size={}, output_size={}'.format(\n            self.num_experts, self.input_size, self.output_size)\n\n    def reset_parameters(self) -> None:\n        \"\"\"\n        Reset the parameters of the model.\n        \"\"\"\n        # std = math.sqrt(2.0 / float(self.weight.size(1) + self.weight.size(2)))\n        # a = math.sqrt(3.0) * std\n        nn.init.uniform_(self.weight, -1. / self.weight.size(1), 1. / self.weight.size(1))\n        if self.bias is not None:\n            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            nn.init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, inputs, expert_size):\n        \"\"\"\n        Forward pass of the ParallelExperts module.","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.extra_repr","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.extra_repr#L162-L164","kind":"function","name":"extra_repr","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":162,"end_line":164,"context_start_line":142,"context_end_line":184,"code":"        Args:\n            num_experts (int): Number of experts.\n            input_size (int): Size of the input.\n            output_size (int): Size of the output.\n            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))\n        else:\n            self.bias = None\n        self.reset_parameters()\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.output_size = output_size\n\n    def extra_repr(self):\n        return 'num_experts={}, input_size={}, output_size={}'.format(\n            self.num_experts, self.input_size, self.output_size)\n\n    def reset_parameters(self) -> None:\n        \"\"\"\n        Reset the parameters of the model.\n        \"\"\"\n        # std = math.sqrt(2.0 / float(self.weight.size(1) + self.weight.size(2)))\n        # a = math.sqrt(3.0) * std\n        nn.init.uniform_(self.weight, -1. / self.weight.size(1), 1. / self.weight.size(1))\n        if self.bias is not None:\n            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            nn.init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, inputs, expert_size):\n        \"\"\"\n        Forward pass of the ParallelExperts module.\n\n        Args:\n            inputs (Tensor): Input tensor.\n            expert_size: Expert size information.","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"py:moduleformer.utils.parallel_experts.reset_parameters","uri":"program://ModuleFormer/function/moduleformer.utils.parallel_experts.reset_parameters#L166-L176","kind":"function","name":"reset_parameters","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":166,"end_line":176,"context_start_line":146,"context_end_line":196,"code":"            bias (bool): Whether to include bias terms.\n        \"\"\"\n        super().__init__()\n        # self.input_experts = nn.ModuleList(\n        #     [nn.Linear(input_size, output_size, bias=bias) for _ in range(num_experts)]\n        # )\n        self.weight = nn.Parameter(torch.empty(num_experts, input_size, output_size))\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(num_experts, output_size))\n        else:\n            self.bias = None\n        self.reset_parameters()\n        self.num_experts = num_experts\n        self.input_size = input_size\n        self.output_size = output_size\n\n    def extra_repr(self):\n        return 'num_experts={}, input_size={}, output_size={}'.format(\n            self.num_experts, self.input_size, self.output_size)\n\n    def reset_parameters(self) -> None:\n        \"\"\"\n        Reset the parameters of the model.\n        \"\"\"\n        # std = math.sqrt(2.0 / float(self.weight.size(1) + self.weight.size(2)))\n        # a = math.sqrt(3.0) * std\n        nn.init.uniform_(self.weight, -1. / self.weight.size(1), 1. / self.weight.size(1))\n        if self.bias is not None:\n            fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight[0])\n            bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0\n            nn.init.uniform_(self.bias, -bound, bound)\n\n    def forward(self, inputs, expert_size):\n        \"\"\"\n        Forward pass of the ParallelExperts module.\n\n        Args:\n            inputs (Tensor): Input tensor.\n            expert_size: Expert size information.\n\n        Returns:\n            Tensor: Output tensor.\n        \"\"\"\n        results = ParallelLinear.apply(inputs, expert_size, self.weight, self.bias)\n        # expert_size_list: List[int] = expert_size.tolist()\n        # input_list = inputs.split(expert_size_list, dim=0)\n        # output_list = []\n        # for i in range(self.num_experts):\n        #     output_list.append(self.input_experts[i](input_list[i]))\n        # results = torch.cat(output_list, dim=0)\n        return results","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:setup.py","uri":"program://ModuleFormer/file/setup.py","kind":"file","name":"setup.py","path":"setup.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":8,"code":"from setuptools import setup, find_packages\n\nsetup(name='moduleformer',\n      packages=find_packages(), \n      install_requires=[\n            'torch',\n            'transformers'\n      ])","source_hash":"f2728f0becd30c58953633136656272ca1265d87d0af7c399aa048edc941b2e8","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:test.py","uri":"program://ModuleFormer/file/test.py","kind":"file","name":"test.py","path":"test.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig, AutoModelForSequenceClassification\nfrom obsidian import SparseGPTForCausalLM, SparseGPTConfig, SparseGPTForSequenceClassification\nAutoConfig.register(\"sparsegpt\", SparseGPTConfig)\nAutoModelForCausalLM.register(SparseGPTConfig, SparseGPTForCausalLM)\nAutoModelForSequenceClassification.register(SparseGPTConfig, SparseGPTForSequenceClassification)\n\nmodel_path = \"/dccstor/codeai/yikang/pretrained_models/obsidian-8b-dolly\"\n\nmodel = AutoModelForSequenceClassification.from_pretrained(model_path)\n\nx = torch.randint(low=10, high=101, size=(5, 7))\n\n# 选择模型和tokenizer\ntokenizer = AutoTokenizer.from_pretrained(model_path)\n\n# 输入文本\ntext = \"The quick brown fox jumps over the lazy dog\"\n\n# 对文本进行 tokenization 和 padding\ninput_ids = tokenizer.encode(text, return_tensors=\"pt\")","source_hash":"5702c37d1db11fca9e03bf54f18a031e8c55a8eb91df120ec19b04d6ccd83d14","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:moduleformer/modeling_moduleformer.py","uri":"program://ModuleFormer/file/moduleformer/modeling_moduleformer.py","kind":"file","name":"moduleformer/modeling_moduleformer.py","path":"moduleformer/modeling_moduleformer.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\" PyTorch ModuleFormer model.\"\"\"\n\nfrom typing import Optional, Tuple, Union\nimport math\n\nimport torch\nimport torch.utils.checkpoint\nfrom torch import nn\nfrom torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss\nfrom torch.nn import functional as F\n\nfrom transformers.activations import get_activation\nfrom transformers.modeling_outputs import (\n    BaseModelOutputWithPast, \n    CausalLMOutputWithPast,\n    SequenceClassifierOutputWithPast\n)\nfrom transformers.modeling_utils import PreTrainedModel\nfrom transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging\nfrom .configuration_moduleformer import ModuleFormerConfig\nfrom .utils.moe import MoE","source_hash":"3f879791c90afd36877381520b7b3cd97436a71176e1b55e6418cb46413338c3","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:moduleformer/__init__.py","uri":"program://ModuleFormer/file/moduleformer/__init__.py","kind":"file","name":"moduleformer/__init__.py","path":"moduleformer/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Copyright 2023 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import TYPE_CHECKING\n\nfrom transformers.utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available\n\n\n_import_structure = {\n    \"configuration_moduleformer\": [\"SPARSEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP\", \"ModuleFormerConfig\", \"ModuleFormerOnnxConfig\"],\n}","source_hash":"e2bce92a43d6d64b37ba7f14eab24b05f151256cd56cfc4179dd81d60e93d183","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:moduleformer/configuration_moduleformer.py","uri":"program://ModuleFormer/file/moduleformer/configuration_moduleformer.py","kind":"file","name":"moduleformer/configuration_moduleformer.py","path":"moduleformer/configuration_moduleformer.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\" ModuleFormer model configuration\"\"\"\nfrom collections import OrderedDict\nfrom typing import Any, List, Mapping, Optional\n\nfrom transformers import PreTrainedTokenizer, TensorType, is_torch_available\nfrom transformers.configuration_utils import PretrainedConfig\nfrom transformers.onnx import OnnxConfigWithPast, PatchingSpec\nfrom transformers.utils import logging\n\n\nlogger = logging.get_logger(__name__)\n\n\n# SPARSEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP = {\n#     \"moduleformer-small\": \"https://huggingface.co/moduleformer-small/resolve/main/config.json\",\n# }\n\n\n\nclass ModuleFormerConfig(PretrainedConfig):\n    r\"\"\"","source_hash":"45c91a63cd6e012b20106990eefa95f91fa295a8b07f6798547d633d80167f8c","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:moduleformer/utils/gate.py","uri":"program://ModuleFormer/file/moduleformer/utils/gate.py","kind":"file","name":"moduleformer/utils/gate.py","path":"moduleformer/utils/gate.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom typing import Any, Dict, List, Optional\n\n# @torch.jit.script\ndef log_gmm_posterior(z, expert_centroids):\n     \"\"\"\n    Compute the log posterior probabilities of data points z belonging to Gaussian mixture components defined by centroids.\n\n    Args:\n        z (torch.Tensor): Data points (batch_size x feature_dim).\n        expert_centroids (torch.Tensor): Centroids of Gaussian mixture components (num_experts x feature_dim).\n\n    Returns:\n        torch.Tensor: Log posterior probabilities for each data point (batch_size x num_experts).\n    \"\"\"\n     return (\n        torch.matmul(z, expert_centroids.t())\n        # - 0.5 * (\n        #     torch.einsum('ni,ni->n', z, z)[:, None] +","source_hash":"50c6c515957442e85e32668bcf9dd95bbc6014fedc19bd913fc452d74e2c953d","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:moduleformer/utils/moe.py","uri":"program://ModuleFormer/file/moduleformer/utils/moe.py","kind":"file","name":"moduleformer/utils/moe.py","path":"moduleformer/utils/moe.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\nfrom typing import List\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .parallel_experts import ParallelExperts\nfrom .gate import top_k_gating, compute_gating\n\n\nclass MoE(nn.Module):\n    \"\"\"\n    A Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.\n    \n\n    Args:\n        input_size: integer - size of the input\n        head_size: integer - size of the expert's hidden layer\n        num_experts: an integer - number of experts\n        top_k: an integer - how many experts to use for each batch element","source_hash":"67a659ec7f4870a47dcebcf18016b9390ed09a97d58f2b0ec4ad2ab626cf8633","truncated":false}
{"repo_id":"ModuleFormer","entity_id":"file:moduleformer/utils/parallel_experts.py","uri":"program://ModuleFormer/file/moduleformer/utils/parallel_experts.py","kind":"file","name":"moduleformer/utils/parallel_experts.py","path":"moduleformer/utils/parallel_experts.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.cuda.amp import custom_fwd, custom_bwd\nfrom typing import Any, Dict, List, Optional\nfrom torch import Tensor\n\n\nclass ParallelLinear(torch.autograd.Function):\n    \"\"\"\n    A custom autograd function for Parallel Linear operation.\n    \"\"\"\n\n    @staticmethod\n    @custom_fwd\n    def forward(ctx, input, expert_size_list, weight, bias=None):\n        \"\"\"\n        Forward pass of the ParallelLinear operation.\n\n        Args:","source_hash":"38af1a8a770463b6f044bf3f49cdee78c1e479375bd6ceefddf2191b71cf5340","truncated":false}