hevok commited on
Commit
078ead7
·
verified ·
1 Parent(s): c3d63b6

Upload 0_4_8_results_2025-03-16T22-12-31.013310_lambada_openai.json

Browse files
lm_eval/state-spaces__mamba2-370m/add_bos/0_4_8_results_2025-03-16T22-12-31.013310_lambada_openai.json ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "results": {
3
+ "lambada_openai": {
4
+ "alias": "lambada_openai",
5
+ "perplexity,none": 7.994857320703547,
6
+ "perplexity_stderr,none": 0.217737612169515,
7
+ "acc,none": 0.558897729477974,
8
+ "acc_stderr,none": 0.006917479680386483
9
+ }
10
+ },
11
+ "group_subtasks": {
12
+ "lambada_openai": []
13
+ },
14
+ "configs": {
15
+ "lambada_openai": {
16
+ "task": "lambada_openai",
17
+ "tag": [
18
+ "lambada"
19
+ ],
20
+ "dataset_path": "EleutherAI/lambada_openai",
21
+ "dataset_name": "default",
22
+ "dataset_kwargs": {
23
+ "trust_remote_code": true
24
+ },
25
+ "test_split": "test",
26
+ "doc_to_text": "{{text.split(' ')[:-1]|join(' ')}}",
27
+ "doc_to_target": "{{' '+text.split(' ')[-1]}}",
28
+ "unsafe_code": false,
29
+ "description": "",
30
+ "target_delimiter": " ",
31
+ "fewshot_delimiter": "\n\n",
32
+ "num_fewshot": 0,
33
+ "metric_list": [
34
+ {
35
+ "metric": "perplexity",
36
+ "aggregation": "perplexity",
37
+ "higher_is_better": false
38
+ },
39
+ {
40
+ "metric": "acc",
41
+ "aggregation": "mean",
42
+ "higher_is_better": true
43
+ }
44
+ ],
45
+ "output_type": "loglikelihood",
46
+ "repeats": 1,
47
+ "should_decontaminate": true,
48
+ "doc_to_decontamination_query": "{{text}}",
49
+ "metadata": {
50
+ "version": 1.0
51
+ }
52
+ }
53
+ },
54
+ "versions": {
55
+ "lambada_openai": 1.0
56
+ },
57
+ "n-shot": {
58
+ "lambada_openai": 0
59
+ },
60
+ "higher_is_better": {
61
+ "lambada_openai": {
62
+ "perplexity": false,
63
+ "acc": true
64
+ }
65
+ },
66
+ "n-samples": {
67
+ "lambada_openai": {
68
+ "original": 5153,
69
+ "effective": 5153
70
+ }
71
+ },
72
+ "config": {
73
+ "model": "mamba_ssm",
74
+ "model_args": "pretrained=state-spaces/mamba2-370m,dtype=float32,add_bos_token=True,trust_remote_code=True",
75
+ "model_num_parameters": 368346624,
76
+ "model_dtype": "",
77
+ "model_revision": "main",
78
+ "model_sha": "0eb4c69a118dc9b7568ce0b52990dc8f99967983",
79
+ "batch_size": "auto",
80
+ "batch_sizes": [
81
+ 64
82
+ ],
83
+ "device": "cuda",
84
+ "use_cache": null,
85
+ "limit": null,
86
+ "bootstrap_iters": 100000,
87
+ "gen_kwargs": null,
88
+ "random_seed": 0,
89
+ "numpy_seed": 1234,
90
+ "torch_seed": 1234,
91
+ "fewshot_seed": 1234
92
+ },
93
+ "git_hash": null,
94
+ "date": 1742163068.4889696,
95
+ "pretty_env_info": "PyTorch version: 2.5.1+cu121\nIs debug build: False\nCUDA used to build PyTorch: 12.1\nROCM used to build PyTorch: N/A\n\nOS: Ubuntu 20.04.6 LTS (x86_64)\nGCC version: (Ubuntu 9.4.0-1ubuntu1~20.04.2) 9.4.0\nClang version: Could not collect\nCMake version: version 3.16.3\nLibc version: glibc-2.31\n\nPython version: 3.10.10 (main, Mar 21 2023, 18:45:11) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-5.15.0-1077-aws-x86_64-with-glibc2.31\nIs CUDA available: True\nCUDA runtime version: 12.1.105\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: GPU 0: NVIDIA L4\nNvidia driver version: 535.230.02\ncuDNN version: Probably one of the following:\n/usr/lib/x86_64-linux-gnu/libcudnn.so.8.9.0\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_infer.so.8.9.0\n/usr/lib/x86_64-linux-gnu/libcudnn_adv_train.so.8.9.0\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_infer.so.8.9.0\n/usr/lib/x86_64-linux-gnu/libcudnn_cnn_train.so.8.9.0\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_infer.so.8.9.0\n/usr/lib/x86_64-linux-gnu/libcudnn_ops_train.so.8.9.0\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nByte Order: Little Endian\nAddress sizes: 48 bits physical, 48 bits virtual\nCPU(s): 16\nOn-line CPU(s) list: 0-15\nThread(s) per core: 2\nCore(s) per socket: 8\nSocket(s): 1\nNUMA node(s): 1\nVendor ID: AuthenticAMD\nCPU family: 25\nModel: 1\nModel name: AMD EPYC 7R13 Processor\nStepping: 1\nCPU MHz: 2650.000\nBogoMIPS: 5300.00\nHypervisor vendor: KVM\nVirtualization type: full\nL1d cache: 256 KiB\nL1i cache: 256 KiB\nL2 cache: 4 MiB\nL3 cache: 32 MiB\nNUMA node0 CPU(s): 0-15\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Reg file data sampling: Not affected\nVulnerability Retbleed: Not affected\nVulnerability Spec rstack overflow: Mitigation; safe RET, no microcode\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines; IBPB conditional; IBRS_FW; STIBP always-on; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid aperfmperf tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch topoext invpcid_single ssbd ibrs ibpb stibp vmmcall fsgsbase bmi1 avx2 smep bmi2 invpcid rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr rdpru wbnoinvd arat npt nrip_save vaes vpclmulqdq rdpid\n\nVersions of relevant libraries:\n[pip3] numpy==1.26.4\n[pip3] pytorch-lightning==2.5.0.post0\n[pip3] torch==2.5.1+cu121\n[pip3] torchaudio==2.5.1+cu121\n[pip3] torchmetrics==1.3.1\n[pip3] torchvision==0.20.1+cu121\n[pip3] triton==3.1.0\n[conda] numpy 1.26.4 pypi_0 pypi\n[conda] pytorch-lightning 2.5.0.post0 pypi_0 pypi\n[conda] torch 2.5.1+cu121 pypi_0 pypi\n[conda] torchaudio 2.5.1+cu121 pypi_0 pypi\n[conda] torchmetrics 1.3.1 pypi_0 pypi\n[conda] torchvision 0.20.1+cu121 pypi_0 pypi\n[conda] triton 3.1.0 pypi_0 pypi",
96
+ "transformers_version": "4.49.0",
97
+ "upper_git_hash": null,
98
+ "tokenizer_pad_token": [
99
+ "<|endoftext|>",
100
+ "0"
101
+ ],
102
+ "tokenizer_eos_token": [
103
+ "<|endoftext|>",
104
+ "0"
105
+ ],
106
+ "tokenizer_bos_token": [
107
+ "<|endoftext|>",
108
+ "0"
109
+ ],
110
+ "eot_token_id": 0,
111
+ "max_length": 2048,
112
+ "task_hashes": {},
113
+ "model_source": "mamba_ssm",
114
+ "model_name": "state-spaces/mamba2-370m",
115
+ "model_name_sanitized": "state-spaces__mamba2-370m",
116
+ "system_instruction": null,
117
+ "system_instruction_sha": null,
118
+ "fewshot_as_multiturn": false,
119
+ "chat_template": null,
120
+ "chat_template_sha": null,
121
+ "start_time": 495.958865336,
122
+ "end_time": 590.215747822,
123
+ "total_evaluation_time_seconds": "94.25688248600005"
124
+ }