Pklett commited on
Commit
ac9e251
·
2 Parent(s): 90f88b7 c0a2dea

Merge branch 'main' of https://huggingface.co/normalcomputing/extended-mind-mpt-7b into main

Browse files
Files changed (3) hide show
  1. blocks.py +1 -1
  2. config.json +2 -2
  3. modeling_mpt.py +4 -4
blocks.py CHANGED
@@ -7,7 +7,7 @@
7
  from typing import Dict, Optional, Tuple
8
  import torch
9
  import torch.nn as nn
10
- from memorizing_transformers.mpt.attention import ATTN_CLASS_REGISTRY
11
  from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
12
 
13
  class MPTMLP(nn.Module):
 
7
  from typing import Dict, Optional, Tuple
8
  import torch
9
  import torch.nn as nn
10
+ from attention import ATTN_CLASS_REGISTRY
11
  from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
12
 
13
  class MPTMLP(nn.Module):
config.json CHANGED
@@ -21,8 +21,8 @@
21
  "use_active_externalism": true
22
  },
23
  "auto_map": {
24
- "AutoConfig": "mosaicml/mpt-7b--configuration_mpt.MPTConfig",
25
- "AutoModelForCausalLM": "mosaicml/mpt-7b--modeling_mpt.MPTForCausalLM"
26
  },
27
  "d_model": 4096,
28
  "emb_pdrop": 0,
 
21
  "use_active_externalism": true
22
  },
23
  "auto_map": {
24
+ "AutoConfig": "configuration.ExtendedMPTConfig",
25
+ "AutoModelForCausalLM": "modeling_mpt.ExtendedMPTForCausalLM"
26
  },
27
  "d_model": 4096,
28
  "emb_pdrop": 0,
modeling_mpt.py CHANGED
@@ -27,10 +27,10 @@ from llmfoundry.models.layers.custom_embedding import SharedEmbedding
27
  from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
28
  from llmfoundry.models.utils.param_init_fns import MODEL_INIT_REGISTRY
29
 
30
- from memorizing_transformers.mpt.configuration import ExtendedMPTConfig
31
- from memorizing_transformers.mpt.attention import attn_bias_shape, build_attn_bias
32
- from memorizing_transformers.mpt.blocks import MPTBlock
33
- from memorizing_transformers.utils import instantiate_from_config
34
 
35
  Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
36
 
 
27
  from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY
28
  from llmfoundry.models.utils.param_init_fns import MODEL_INIT_REGISTRY
29
 
30
+ from configuration import ExtendedMPTConfig
31
+ from attention import attn_bias_shape, build_attn_bias
32
+ from blocks import MPTBlock
33
+ from utils import instantiate_from_config
34
 
35
  Tokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
36