gsaltintas commited on
Commit
9aa3c49
·
verified ·
1 Parent(s): 5cb8994

Upload model files

Browse files
Files changed (3) hide show
  1. .config.json.swp +0 -0
  2. config.json +1 -5
  3. modeling_llama_albert.py +2 -0
.config.json.swp ADDED
Binary file (12.3 kB). View file
 
config.json CHANGED
@@ -22,9 +22,5 @@
22
  "tie_word_embeddings": false,
23
  "transformers_version": "4.57.3",
24
  "use_cache": true,
25
- "vocab_size": 851586,
26
- "auto_map": {
27
- "AutoConfig": "modeling_llama_albert.LlamaAlbertConfig",
28
- "AutoModelForCausalLM": "modeling_llama_albert.LlamaAlbertForCausalLM"
29
- }
30
  }
 
22
  "tie_word_embeddings": false,
23
  "transformers_version": "4.57.3",
24
  "use_cache": true,
25
+ "vocab_size": 851586
 
 
 
 
26
  }
modeling_llama_albert.py CHANGED
@@ -13,7 +13,9 @@ class LlamaAlbertConfig(LlamaConfig):
13
  "AutoConfig": "modeling_llama_albert.LlamaAlbertConfig",
14
  "AutoModelForCausalLM": "modeling_llama_albert.LlamaAlbertForCausalLM"
15
  }
 
16
  def __init__(self, embedding_dim=128, **kwargs):
 
17
  super().__init__(**kwargs)
18
  self.embedding_dim = embedding_dim
19
 
 
13
  "AutoConfig": "modeling_llama_albert.LlamaAlbertConfig",
14
  "AutoModelForCausalLM": "modeling_llama_albert.LlamaAlbertForCausalLM"
15
  }
16
+ _auto_class: str = "modeling_llama_albert.LlamaAlbertForCausalLM"
17
  def __init__(self, embedding_dim=128, **kwargs):
18
+ print(kwargs)
19
  super().__init__(**kwargs)
20
  self.embedding_dim = embedding_dim
21