ataeff commited on
Commit
39e40b3
·
verified ·
1 Parent(s): b04d034

Upload 7 files

Browse files
.gitattributes CHANGED
@@ -37,3 +37,5 @@ arianna_3b_2500_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
37
  arianna_3b_3000_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
38
  arianna_3b_2500_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
39
  arianna_3b_3000_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
37
  arianna_3b_3000_q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
38
  arianna_3b_2500_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
39
  arianna_3b_3000_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
40
+ llama1-1b/arianna_1b_1500_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ llama1-1b/arianna_1b_3000_q4_0.gguf filter=lfs diff=lfs merge=lfs -text
llama1-1b/adapter_1500.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a1b9505673da544b8b5faaa7ad1578fa6380acf7865a4f501dd9ad2253a6f74a
3
+ size 201892112
llama1-1b/adapter_3000.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:474a4767ca52d6d53a839a01f45940673554efd38c7455fa60c881b885ac90e6
3
+ size 201892112
llama1-1b/adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 128,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 64,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "down_proj",
24
+ "gate_proj",
25
+ "k_proj",
26
+ "q_proj",
27
+ "v_proj",
28
+ "o_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
llama1-1b/arianna_1b_1500_q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:959ca9816ca442ac7c4f7c3405a48e1ff1cedce65486daaf94f61cd5f0c3a7a3
3
+ size 636726752
llama1-1b/arianna_1b_3000_q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56570c473647225da60d379910f08d09997b64ef0ee9fe5ff33612403de7b82e
3
+ size 636726752
llama1-1b/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
llama1-1b/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723