cbrownpinilla commited on
Commit
5fc231f
·
1 Parent(s): 79073a5

HF integration

Browse files
config.json CHANGED
@@ -8,6 +8,10 @@
8
  "attention_dropout": 0.0,
9
  "attention_layer_norm": true,
10
  "attention_layer_norm_with_affine": true,
 
 
 
 
11
  "bias_for_layer_norm": false,
12
  "block_group_size": 1,
13
  "block_type": "sequential",
 
8
  "attention_dropout": 0.0,
9
  "attention_layer_norm": true,
10
  "attention_layer_norm_with_affine": true,
11
+ "auto_map": {
12
+ "AutoConfig": "configuration_olmo.OLMoConfig",
13
+ "AutoModelForCausalLM": "modeling_fan.OLMoForCausalLM"
14
+ },
15
  "bias_for_layer_norm": false,
16
  "block_group_size": 1,
17
  "block_type": "sequential",
configuration_olmo.py CHANGED
@@ -1 +1,4 @@
 
1
  from hf_olmo import OLMoConfig
 
 
 
1
+ from transformers import AutoConfig
2
  from hf_olmo import OLMoConfig
3
+
4
+ AutoConfig.register("hf_olmo", OLMoConfig)
modeling_olmo.py CHANGED
@@ -1 +1,4 @@
1
- from hf_olmo import OLMoForCausalLM
 
 
 
 
1
+ from hf_olmo import OLMoForCausalLM, OLMoConfig
2
+ from transformers import AutoModel
3
+
4
+ AutoModel.register(OLMoConfig, OLMoForCausalLM)
tokenization_olmo_fast.py CHANGED
@@ -1 +1,4 @@
1
- from hf_olmo.tokenization_olmo_fast import OLMoTokenizerFast
 
 
 
 
1
+ from hf_olmo.tokenization_olmo_fast import OLMoTokenizerFast, OLMoConfig
2
+ from transformers import AutoTokenizer
3
+
4
+ AutoTokenizer.register(OLMoConfig, fast_tokenizer_class=OLMoTokenizerFast)