valteu commited on
Commit
c25d5ce
·
verified ·
1 Parent(s): 748a96a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ experiment_config.json filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 128000,
8
+ "eos_token_id": [
9
+ 128001,
10
+ 128008,
11
+ 128009
12
+ ],
13
+ "head_dim": 64,
14
+ "hidden_act": "silu",
15
+ "hidden_size": 2048,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 8192,
18
+ "max_position_embeddings": 131072,
19
+ "mlp_bias": false,
20
+ "model_type": "llama",
21
+ "num_attention_heads": 32,
22
+ "num_hidden_layers": 16,
23
+ "num_key_value_heads": 8,
24
+ "pad_token_id": 128004,
25
+ "pretraining_tp": 1,
26
+ "rms_norm_eps": 1e-05,
27
+ "rope_scaling": {
28
+ "factor": 32.0,
29
+ "high_freq_factor": 4.0,
30
+ "low_freq_factor": 1.0,
31
+ "original_max_position_embeddings": 8192,
32
+ "rope_type": "llama3"
33
+ },
34
+ "rope_theta": 500000.0,
35
+ "tie_word_embeddings": true,
36
+ "torch_dtype": "bfloat16",
37
+ "transformers_version": "4.52.4",
38
+ "use_cache": true,
39
+ "vocab_size": 128256
40
+ }
experiment_config.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6883313f4bcf230774d87cbdb27ea1c6bf0c2fe53ff061d16df9a3f4d55361b
3
+ size 20258370
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009
8
+ ],
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.52.4"
12
+ }
logs.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87f25b7868dfe693135671e51b49f6b08f6d35a431ee82dc2d0de65d6767da03
3
+ size 2471645608
profiler_cache.csv ADDED
The diff for this file is too large to render. See raw diff
 
results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "commonsense_qa_commonsense_qa_acc_score": 0.5167895167895168,
4
+ "commonsense_qa_commonsense_qa_acc_sem": 0.014306885655251397,
5
+ "mathqa_mathqa_acc_score": 0.5031825795644891,
6
+ "mathqa_mathqa_acc_sem": 0.009148114833567245,
7
+ "mathqa_mathqa_acc_norm_score": 0.4834170854271357,
8
+ "logiqa_logiqa_acc_score": 0.28417818740399386,
9
+ "logiqa_logiqa_acc_sem": 0.01809529226082815,
10
+ "logiqa_logiqa_acc_norm_score": 0.30721966205837176
11
+ },
12
+ "energy": {
13
+ "total": 479802.75996,
14
+ "train": 352225.47456,
15
+ "eval": 127577.28540000001
16
+ },
17
+ "train_energy": 352225.47456,
18
+ "eval_energy": 127577.28540000001
19
+ }
summary.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "flops": {
3
+ "eval": 50323539177094400,
4
+ "train": 76563139214162640,
5
+ "total": 126886678391257040
6
+ },
7
+ "total": {
8
+ "total": 479802.75996,
9
+ "train": 352225.47456,
10
+ "eval": 127577.28540000001
11
+ },
12
+ "best_evals": {
13
+ "pplx": {
14
+ "score": 7091.001736438246,
15
+ "step": 932
16
+ },
17
+ "rougel": {
18
+ "precision": 0.32266294227188086,
19
+ "recall": 0.326975791433892,
20
+ "fmeasure": 0.32145517424847037
21
+ }
22
+ }
23
+ }