| #!/usr/bin/env bash | |
| model=/mnt/data/models/ubergarm/Qwen3-Coder-Next-GGUF/Qwen3-Coder-Next-512x2.5B-BF16-00001-of-00004.gguf | |
| #model=/mnt/data/models/ubergarm/Qwen3-Coder-Next-GGUF/Qwen3-Coder-Next-Q8_0.gguf | |
| #model=/mnt/data/models/ubergarm/Qwen3-Coder-Next-GGUF/Qwen3-Coder-Next-IQ4_KSS.gguf | |
| #model=/mnt/data/models/ubergarm/Qwen3-Coder-Next-GGUF/Qwen3-Coder-Next-smol-IQ2_KS.gguf | |
| numactl -N "$SOCKET" -m "$SOCKET" \ | |
| ./build/bin/llama-perplexity \ | |
| -m "$model" \ | |
| -f wiki.test.raw \ | |
| --seed 1337 \ | |
| --ctx-size 512 \ | |
| -ub 512 -b 2048 \ | |
| --validate-quants \ | |
| --no-mmap \ | |
| --numa numactl \ | |
| --threads 96 \ | |
| --threads-batch 128 | |
| SOCKET is set to: 1 | |
| main: build = 4211 (b2cb4512) | |
| main: built with cc (Ubuntu 13.3.0-6ubuntu2~24.04) 13.3.0 for x86_64-linux-gnu | |
| main: seed = 1337 | |
| CPU: using device CPU - 0 MiB free | |
| llama_model_loader: loaded meta data with 43 key-value pairs and 843 tensors from /mnt/data/models/ubergarm/Qwen3-Coder-Next-GGUF/Qwen3.5-Coder-Next-Q8_0.gguf (version GGUF V3 (latest)) | |
| llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. | |
| llama_model_loader: - kv 0: general.architecture str = qwen3next | |
| llama_model_loader: - kv 1: general.type str = model | |
| llama_model_loader: - kv 2: general.sampling.top_k i32 = 40 | |
| llama_model_loader: - kv 3: general.sampling.top_p f32 = 0.950000 | |
| llama_model_loader: - kv 4: general.sampling.temp f32 = 1.000000 | |
| llama_model_loader: - kv 5: general.name str = Qwen3 Coder Next | |
| llama_model_loader: - kv 6: general.size_label str = 512x2.5B | |
| llama_model_loader: - kv 7: general.license str = apache-2.0 | |
| llama_model_loader: - kv 8: general.license.link str = https://huggingface.co/Qwen/Qwen3-Cod... | |
| llama_model_loader: - kv 9: general.tags arr[str,1] = ["text-generation"] | |
| llama_model_loader: - kv 10: qwen3next.block_count u32 = 48 | |
| llama_model_loader: - kv 11: qwen3next.context_length u32 = 262144 | |
| llama_model_loader: - kv 12: qwen3next.embedding_length u32 = 2048 | |
| llama_model_loader: - kv 13: qwen3next.feed_forward_length u32 = 5120 | |
| llama_model_loader: - kv 14: qwen3next.attention.head_count u32 = 16 | |
| llama_model_loader: - kv 15: qwen3next.attention.head_count_kv u32 = 2 | |
| llama_model_loader: - kv 16: qwen3next.rope.freq_base f32 = 5000000.000000 | |
| llama_model_loader: - kv 17: qwen3next.attention.layer_norm_rms_epsilon f32 = 0.000001 | |
| llama_model_loader: - kv 18: qwen3next.expert_count u32 = 512 | |
| llama_model_loader: - kv 19: qwen3next.expert_used_count u32 = 10 | |
| llama_model_loader: - kv 20: qwen3next.attention.key_length u32 = 256 | |
| llama_model_loader: - kv 21: qwen3next.attention.value_length u32 = 256 | |
| llama_model_loader: - kv 22: general.file_type u32 = 7 | |
| llama_model_loader: - kv 23: qwen3next.expert_feed_forward_length u32 = 512 | |
| llama_model_loader: - kv 24: qwen3next.expert_shared_feed_forward_length u32 = 512 | |
| llama_model_loader: - kv 25: qwen3next.ssm.conv_kernel u32 = 4 | |
| llama_model_loader: - kv 26: qwen3next.ssm.state_size u32 = 128 | |
| llama_model_loader: - kv 27: qwen3next.ssm.group_count u32 = 16 | |
| llama_model_loader: - kv 28: qwen3next.ssm.time_step_rank u32 = 32 | |
| llama_model_loader: - kv 29: qwen3next.ssm.inner_size u32 = 4096 | |
| llama_model_loader: - kv 30: qwen3next.full_attention_interval u32 = 4 | |
| llama_model_loader: - kv 31: qwen3next.rope.dimension_count u32 = 64 | |
| llama_model_loader: - kv 32: general.quantization_version u32 = 2 | |
| llama_model_loader: - kv 33: tokenizer.ggml.model str = gpt2 | |
| llama_model_loader: - kv 34: tokenizer.ggml.pre str = qwen2 | |
| llama_model_loader: - kv 35: tokenizer.ggml.tokens arr[str,151936] = ["!", "\"", "#", "$", "%", "&", "'", ... | |
| llama_model_loader: - kv 36: tokenizer.ggml.token_type arr[i32,151936] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... | |
| llama_model_loader: - kv 37: tokenizer.ggml.merges arr[str,151387] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",... | |
| llama_model_loader: - kv 38: tokenizer.ggml.eos_token_id u32 = 151645 | |
| llama_model_loader: - kv 39: tokenizer.ggml.padding_token_id u32 = 151643 | |
| llama_model_loader: - kv 40: tokenizer.ggml.bos_token_id u32 = 151643 | |
| llama_model_loader: - kv 41: tokenizer.ggml.add_bos_token bool = false | |
| llama_model_loader: - kv 42: tokenizer.chat_template str = {% macro render_extra_keys(json_dict,... | |
| llama_model_loader: - type f32: 361 tensors | |
| llama_model_loader: - type q8_0: 482 tensors | |
| load: printing all EOG tokens: | |
| load: - 151643 ('<|endoftext|>') | |
| load: - 151645 ('<|im_end|>') | |
| load: - 151662 ('<|fim_pad|>') | |
| load: - 151663 ('<|repo_name|>') | |
| load: - 151664 ('<|file_sep|>') | |
| load: special tokens cache size = 26 | |
| load: token to piece cache size = 0.9311 MB | |
| llm_load_print_meta: format = GGUF V3 (latest) | |
| llm_load_print_meta: arch = qwen3next | |
| llm_load_print_meta: n_ctx_train = 262144 | |
| llm_load_print_meta: n_embd = 2048 | |
| llm_load_print_meta: n_layer = 48 | |
| llm_load_print_meta: n_head = 16 | |
| llm_load_print_meta: n_head_kv = 2 | |
| llm_load_print_meta: n_rot = 64 | |
| llm_load_print_meta: n_swa = 0 | |
| llm_load_print_meta: n_swa_pattern = 1 | |
| llm_load_print_meta: n_embd_head_k = 256 | |
| llm_load_print_meta: n_embd_head_v = 256 | |
| llm_load_print_meta: n_gqa = 8 | |
| llm_load_print_meta: n_embd_k_gqa = 512 | |
| llm_load_print_meta: n_embd_v_gqa = 512 | |
| llm_load_print_meta: f_norm_eps = 0.0e+00 | |
| llm_load_print_meta: f_norm_rms_eps = 1.0e-06 | |
| llm_load_print_meta: f_clamp_kqv = 0.0e+00 | |
| llm_load_print_meta: f_max_alibi_bias = 0.0e+00 | |
| llm_load_print_meta: f_logit_scale = 0.0e+00 | |
| llm_load_print_meta: n_ff = 5120 | |
| llm_load_print_meta: n_expert = 512 | |
| llm_load_print_meta: n_expert_used = 10 | |
| llm_load_print_meta: causal attn = 1 | |
| llm_load_print_meta: pooling type = 0 | |
| llm_load_print_meta: rope type = 2 | |
| llm_load_print_meta: rope scaling = linear | |
| llm_load_print_meta: freq_base_train = 5000000.0 | |
| llm_load_print_meta: freq_scale_train = 1 | |
| llm_load_print_meta: n_ctx_orig_yarn = 262144 | |
| llm_load_print_meta: rope_finetuned = unknown | |
| llm_load_print_meta: ssm_d_conv = 4 | |
| llm_load_print_meta: ssm_d_inner = 4096 | |
| llm_load_print_meta: ssm_d_state = 128 | |
| llm_load_print_meta: ssm_dt_rank = 32 | |
| llm_load_print_meta: model type = 80B.A3B | |
| llm_load_print_meta: model ftype = Q8_0 | |
| llm_load_print_meta: model params = 79.674 B | |
| llm_load_print_meta: model size = 78.982 GiB (8.515 BPW) | |
| llm_load_print_meta: repeating layers = 78.366 GiB (8.515 BPW, 79.052 B parameters) | |
| llm_load_print_meta: general.name = Qwen3 Coder Next | |
| print_info: vocab type = BPE | |
| print_info: n_vocab = 151936 | |
| print_info: n_merges = 151387 | |
| print_info: BOS token = 151643 '<|endoftext|>' | |
| print_info: EOS token = 151645 '<|im_end|>' | |
| print_info: EOT token = 151645 '<|im_end|>' | |
| print_info: PAD token = 151643 '<|endoftext|>' | |
| print_info: LF token = 198 'Ċ' | |
| print_info: FIM PRE token = 151659 '<|fim_prefix|>' | |
| print_info: FIM SUF token = 151661 '<|fim_suffix|>' | |
| print_info: FIM MID token = 151660 '<|fim_middle|>' | |
| print_info: FIM PAD token = 151662 '<|fim_pad|>' | |
| print_info: FIM REP token = 151663 '<|repo_name|>' | |
| print_info: FIM SEP token = 151664 '<|file_sep|>' | |
| print_info: EOG token = 151643 '<|endoftext|>' | |
| print_info: EOG token = 151645 '<|im_end|>' | |
| print_info: EOG token = 151662 '<|fim_pad|>' | |
| print_info: EOG token = 151663 '<|repo_name|>' | |
| print_info: EOG token = 151664 '<|file_sep|>' | |
| print_info: max token length = 256 | |
| llm_load_tensors: ggml ctx size = 0.35 MiB | |
| llm_load_tensors: offloading 0 repeating layers to GPU | |
| llm_load_tensors: offloaded 0/49 layers to GPU | |
| llm_load_tensors: CPU buffer size = 80877.56 MiB | |
| .................................................................................................... | |
| llama_init_from_model: n_ctx = 2048 | |
| llama_init_from_model: n_batch = 2048 | |
| llama_init_from_model: n_ubatch = 512 | |
| llama_init_from_model: flash_attn = 1 | |
| llama_init_from_model: attn_max_b = 0 | |
| llama_init_from_model: fused_moe = 1 | |
| llama_init_from_model: grouped er = 0 | |
| llama_init_from_model: fused_up_gate = 1 | |
| llama_init_from_model: fused_mmad = 1 | |
| llama_init_from_model: rope_cache = 0 | |
| llama_init_from_model: graph_reuse = 1 | |
| llama_init_from_model: k_cache_hadam = 0 | |
| llama_init_from_model: split_mode_graph_scheduling = 0 | |
| llama_init_from_model: reduce_type = f16 | |
| llama_init_from_model: sched_async = 0 | |
| llama_init_from_model: ser = -1, 0 | |
| llama_init_from_model: freq_base = 5000000.0 | |
| llama_init_from_model: freq_scale = 1 | |
| llama_kv_cache_init: CPU KV buffer size = 349.50 MiB | |
| llama_init_from_model: KV self size = 48.00 MiB, K (f16): 24.00 MiB, V (f16): 24.00 MiB | |
| llama_init_from_model: CPU output buffer size = 2.32 MiB | |
| llama_init_from_model: CPU compute buffer size = 300.75 MiB | |
| llama_init_from_model: graph nodes = 12382 | |
| llama_init_from_model: graph splits = 1 | |
| llama_init_from_model: enabling only_active_experts scheduling | |
| system_info: n_threads = 96 (n_threads_batch = 128) / 512 | AVX = 1 | AVX_VNNI = 1 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 0 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | | |
| perplexity: tokenizing the input .. | |
| perplexity: tokenization took 393.016 ms | |
| perplexity: calculating perplexity over 584 chunks, n_ctx=512, batch_size=2048, n_seq=4 | |
| perplexity: 3.89 seconds per pass - ETA 9.45 minutes | |
| ===================================== llama_init_from_model: f16 | |
| ======================================= HAVE_FANCY_SIMD is defined | |
| [1]4.5875,[2]6.5391,[3]5.5418,[4]4.7813,[5]4.7239,[6]4.8641,[7]4.9464,[8]4.9799,[9]4.8794,[10]4.9186,[11]4.8311,[12]5.0638,[13]5.4758,[14]5.4421,[15]5.5581,[16]5.9216,[17]5.7716,[18]5.9584,[19]6.0471,[20]6.0826,[21]6.0039,[22]6.1138,[23]5.9257,[24]5.6949,[25]5.6193,[26]5.5165,[27]5.4492,[28]5.4022,[29]5.4725,[30]5.4682,[31]5.4576,[32]5.5297,[33]5.4780,[34]5.5340,[35]5.6289,[36]5.7241,[37]5.8537,[38]5.9517,[39]5.9865,[40]6.0831,[41]6.1320,[42]6.1626,[43]6.2308,[44]6.2354,[45]6.2713,[46]6.3169,[47]6.4885,[48]6.6022,[49]6.6025,[50]6.6506,[51]6.6955,[52]6.7621,[53]6.8298,[54]6.8792,[55]6.8985,[56]6.9732,[57]6.9892,[58]7.0319,[59]7.0810,[60]7.1243,[61]7.1677,[62]7.2040,[63]7.2667,[64]7.3282,[65]7.3910,[66]7.4599,[67]7.5227,[68]7.5168,[69]7.5369,[70]7.5450,[71]7.5845,[72]7.6558,[73]7.7078,[74]7.7400,[75]7.7186,[76]7.7322,[77]7.7932,[78]7.8266,[79]7.7395,[80]7.7131,[81]7.7061,[82]7.7441,[83]7.7189,[84]7.7117,[85]7.7374,[86]7.8199,[87]7.8592,[88]7.8776,[89]7.8941,[90]7.8811,[91]7.9399,[92]7.9209,[93]7.9644,[94]7.9827,[95]7.9716,[96]7.9631,[97]7.9575,[98]7.9951,[99]7.9748,[100]8.0542,[101]8.1007,[102]8.0984,[103]8.1046,[104]8.0933,[105]8.0955,[106]8.0986,[107]8.1340,[108]8.1685,[109]8.2070,[110]8.2639,[111]8.3726,[112]8.3847,[113]8.3486,[114]8.4009,[115]8.4281,[116]8.3756,[117]8.3775,[118]8.3687,[119]8.3335,[120]8.3531,[121]8.3422,[122]8.3302,[123]8.2914,[124]8.2508,[125]8.2295,[126]8.2170,[127]8.1696,[128]8.1543,[129]8.1240,[130]8.0819,[131]8.0492,[132]8.0243,[133]8.0171,[134]8.0301,[135]8.0236,[136]8.0272,[137]7.9992,[138]7.9701,[139]7.9854,[140]7.9697,[141]7.9669,[142]7.9874,[143]7.9921,[144]8.0284,[145]8.0062,[146]7.9704,[147]7.9344,[148]7.8958,[149]7.8752,[150]7.8334,[151]7.8219,[152]7.8126,[153]7.8092,[154]7.7721,[155]7.7746,[156]7.7377,[157]7.7190,[158]7.6944,[159]7.6786,[160]7.6406,[161]7.6243,[162]7.6177,[163]7.5988,[164]7.6113,[165]7.6010,[166]7.5931,[167]7.5936,[168]7.6145,[169]7.6185,[170]7.6464,[171]7.6500,[172]7.6718,[173]7.7234,[174]7.7359,[175]7.7903,[176]7.8151,[177]7.8677,[178]7.9072,[179]7.9127,[180]7.8864,[181]7.8569,[182]7.8669,[183]7.8351,[184]7.8213,[185]7.7934,[186]7.7633,[187]7.7423,[188]7.7376,[189]7.7526,[190]7.7795,[191]7.7907,[192]7.8004,[193]7.8020,[194]7.8192,[195]7.8349,[196]7.8423,[197]7.8494,[198]7.8328,[199]7.8242,[200]7.8113,[201]7.8119,[202]7.8288,[203]7.8531,[204]7.8743,[205]7.8922,[206]7.8944,[207]7.9204,[208]7.9081,[209]7.9079,[210]7.9063,[211]7.9100,[212]7.9139,[213]7.9127,[214]7.9003,[215]7.8869,[216]7.8808,[217]7.8881,[218]7.8830,[219]7.8656,[220]7.8363,[221]7.8221,[222]7.8089,[223]7.8070,[224]7.8140,[225]7.7961,[226]7.7891,[227]7.7782,[228]7.7538,[229]7.7259,[230]7.7102,[231]7.6916,[232]7.6795,[233]7.6742,[234]7.6720,[235]7.6704,[236]7.6567,[237]7.6480,[238]7.6346,[239]7.6289,[240]7.6378,[241]7.6485,[242]7.6605,[243]7.6586,[244]7.6725,[245]7.6758,[246]7.6980,[247]7.7085,[248]7.7137,[249]7.7237,[250]7.7270,[251]7.7456,[252]7.7635,[253]7.7988,[254]7.8217,[255]7.8256,[256]7.8426,[257]7.8581,[258]7.8453,[259]7.8316,[260]7.8173,[261]7.7956,[262]7.7840,[263]7.7788,[264]7.7764,[265]7.7850,[266]7.7912,[267]7.7904,[268]7.7816,[269]7.7872,[270]7.7839,[271]7.7786,[272]7.7765,[273]7.7742,[274]7.7705,[275]7.7655,[276]7.7512,[277]7.7516,[278]7.7500,[279]7.7421,[280]7.7375,[281]7.7329,[282]7.7307,[283]7.7072,[284]7.6786,[285]7.6887,[286]7.6725,[287]7.6578,[288]7.6568,[289]7.6535,[290]7.6756,[291]7.6809,[292]7.6808,[293]7.6829,[294]7.6995,[295]7.7105,[296]7.7202,[297]7.7424,[298]7.7402,[299]7.7307,[300]7.7315,[301]7.7251,[302]7.7273,[303]7.7223,[304]7.7467,[305]7.7518,[306]7.7497,[307]7.7536,[308]7.7547,[309]7.7544,[310]7.7602,[311]7.7630,[312]7.7532,[313]7.7490,[314]7.7557,[315]7.7434,[316]7.7452,[317]7.7606,[318]7.7680,[319]7.7611,[320]7.7641,[321]7.7534,[322]7.7638,[323]7.7729,[324]7.7806,[325]7.8009,[326]7.7988,[327]7.7870,[328]7.7917,[329]7.7791,[330]7.7707,[331]7.7643,[332]7.7649,[333]7.7685,[334]7.7650,[335]7.7566,[336]7.7590,[337]7.7650,[338]7.7781,[339]7.7748,[340]7.7709,[341]7.7633,[342]7.7633,[343]7.7624,[344]7.7673,[345]7.7760,[346]7.7717,[347]7.7597,[348]7.7629,[349]7.7586,[350]7.7497,[351]7.7482,[352]7.7532,[353]7.7523,[354]7.7426,[355]7.7545,[356]7.7631,[357]7.7686,[358]7.7614,[359]7.7656,[360]7.7660,[361]7.7759,[362]7.7672,[363]7.7609,[364]7.7680,[365]7.7866,[366]7.8134,[367]7.8287,[368]7.8593,[369]7.8742,[370]7.8896,[371]7.9130,[372]7.9338,[373]7.9449,[374]7.9543,[375]7.9728,[376]7.9863,[377]7.9976,[378]8.0101,[379]8.0213,[380]8.0378,[381]8.0547,[382]8.0647,[383]8.0727,[384]8.0838,[385]8.1097,[386]8.1306,[387]8.1295,[388]8.1297,[389]8.1393,[390]8.1631,[391]8.1808,[392]8.1749,[393]8.1734,[394]8.1663,[395]8.1673,[396]8.1756,[397]8.1846,[398]8.1902,[399]8.1980,[400]8.2104,[401]8.2112,[402]8.2106,[403]8.2021,[404]8.1795,[405]8.1670,[406]8.1673,[407]8.1752,[408]8.1851,[409]8.1873,[410]8.1961,[411]8.2133,[412]8.2197,[413]8.2181,[414]8.2159,[415]8.2107,[416]8.2037,[417]8.2099,[418]8.2194,[419]8.2237,[420]8.2255,[421]8.2326,[422]8.2217,[423]8.2213,[424]8.2240,[425]8.2268,[426]8.2283,[427]8.2356,[428]8.2494,[429]8.2573,[430]8.2530,[431]8.2494,[432]8.2536,[433]8.2568,[434]8.2587,[435]8.2682,[436]8.2620,[437]8.2673,[438]8.2690,[439]8.2641,[440]8.2684,[441]8.2672,[442]8.2645,[443]8.2568,[444]8.2591,[445]8.2505,[446]8.2530,[447]8.2469,[448]8.2416,[449]8.2355,[450]8.2415,[451]8.2405,[452]8.2285,[453]8.2199,[454]8.2177,[455]8.2240,[456]8.2229,[457]8.2282,[458]8.2418,[459]8.2390,[460]8.2393,[461]8.2374,[462]8.2352,[463]8.2458,[464]8.2449,[465]8.2458,[466]8.2483,[467]8.2540,[468]8.2595,[469]8.2643,[470]8.2695,[471]8.2593,[472]8.2674,[473]8.2575,[474]8.2567,[475]8.2620,[476]8.2605,[477]8.2506,[478]8.2361,[479]8.2396,[480]8.2475,[481]8.2514,[482]8.2405,[483]8.2480,[484]8.2555,[485]8.2596,[486]8.2591,[487]8.2645,[488]8.2599,[489]8.2489,[490]8.2482,[491]8.2419,[492]8.2427,[493]8.2344,[494]8.2327,[495]8.2281,[496]8.2245,[497]8.2362,[498]8.2428,[499]8.2357,[500]8.2358,[501]8.2366,[502]8.2351,[503]8.2484,[504]8.2517,[505]8.2556,[506]8.2535,[507]8.2504,[508]8.2543,[509]8.2519,[510]8.2510,[511]8.2543,[512]8.2496,[513]8.2516,[514]8.2549,[515]8.2549,[516]8.2573,[517]8.2604,[518]8.2537,[519]8.2540,[520]8.2569,[521]8.2595,[522]8.2499,[523]8.2494,[524]8.2466,[525]8.2499,[526]8.2552,[527]8.2570,[528]8.2560,[529]8.2508,[530]8.2471,[531]8.2510,[532]8.2483,[533]8.2472,[534]8.2475,[535]8.2486,[536]8.2413,[537]8.2474,[538]8.2560,[539]8.2523,[540]8.2648,[541]8.2669,[542]8.2615,[543]8.2644,[544]8.2708,[545]8.2668,[546]8.2583,[547]8.2508,[548]8.2356,[549]8.2363,[550]8.2200,[551]8.2092,[552]8.1997,[553]8.1730,[554]8.1722,[555]8.1753,[556]8.1762,[557]8.1790,[558]8.1785,[559]8.1850,[560]8.1915,[561]8.2008,[562]8.2139,[563]8.2215,[564]8.2194,[565]8.2281,[566]8.2281,[567]8.2141,[568]8.2063,[569]8.2034,[570]8.2029,[571]8.2027,[572]8.2051,[573]8.2058,[574]8.2074,[575]8.2069,[576]8.2131,[577]8.2080,[578]8.2128,[579]8.2179,[580]8.2319,[581]8.2334,[582]8.2452,[583]8.2296,[584]8.2239, | |
| llama_print_timings: load time = 17688.23 ms | |
| llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) | |
| llama_print_timings: prompt eval time = 476918.01 ms / 299008 tokens ( 1.60 ms per token, 626.96 tokens per second) | |
| llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second) | |
| llama_print_timings: total time = 488325.61 ms / 299009 tokens | |
| Final estimate: PPL over 584 chunks for n_ctx=512 = 8.2239 +/- 0.06389 | |