natankatz commited on
Commit
d28314e
·
1 Parent(s): de4c049

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "natankatz/solidity_1",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -22,5 +22,5 @@
22
  "torch_dtype": "float16",
23
  "transformers_version": "4.34.0.dev0",
24
  "use_cache": true,
25
- "vocab_size": 42414
26
  }
 
1
  {
2
+ "_name_or_path": "codellama/CodeLlama-7b-hf",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
22
  "torch_dtype": "float16",
23
  "transformers_version": "4.34.0.dev0",
24
  "use_cache": true,
25
+ "vocab_size": 32016
26
  }
pytorch_model-00001-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cb15ec716b65a040e1e3deb89671d8f1a766aa6211b69db0e93100dea405904
3
- size 9971739657
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:100f053e17a1f5ac4efaba5b58a5e7e17754dd80161464182d882dca41d09616
3
+ size 9976754202
pytorch_model-00002-of-00002.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a86ccad7cfbfb57f3daff799973c1bc02690f95c1eee72e166b40739ae7190c0
3
- size 3675818233
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf47d24595f15830b9b655c2f590afd19e4b96402cbeac615f860f6db143418a
3
+ size 3500442883
pytorch_model.bin.index.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "metadata": {
3
- "total_size": 13647454208
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00002-of-00002.bin",
@@ -149,11 +149,11 @@
149
  "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
150
  "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
151
  "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
152
- "model.layers.23.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
153
- "model.layers.23.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
154
  "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
155
  "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
156
- "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
157
  "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
158
  "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
159
  "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
 
1
  {
2
  "metadata": {
3
+ "total_size": 13477093376
4
  },
5
  "weight_map": {
6
  "lm_head.weight": "pytorch_model-00002-of-00002.bin",
 
149
  "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
150
  "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
151
  "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
153
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
154
  "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
155
  "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
156
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
157
  "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
158
  "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
159
  "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",