jdp8 commited on
Commit
c3400f2
·
verified ·
1 Parent(s): 828bcb7

Upload new WASM files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. web-llm-models/v0_2_80/Llama-2-13b-chat-hf-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  2. web-llm-models/v0_2_80/Llama-2-7b-chat-hf-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  3. web-llm-models/v0_2_80/Llama-2-7b-chat-hf-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  4. web-llm-models/v0_2_80/Llama-3-70B-Instruct-q3f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  5. web-llm-models/v0_2_80/Llama-3-8B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  6. web-llm-models/v0_2_80/Llama-3-8B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  7. web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q0f16-ctx4k_cs1k-webgpu.wasm +3 -0
  8. web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q0f32-ctx4k_cs1k-webgpu.wasm +3 -0
  9. web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  10. web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  11. web-llm-models/v0_2_80/Llama-3.2-3B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  12. web-llm-models/v0_2_80/Llama-3.2-3B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  13. web-llm-models/v0_2_80/Llama-3_1-70B-Instruct-q3f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  14. web-llm-models/v0_2_80/Llama-3_1-8B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  15. web-llm-models/v0_2_80/Llama-3_1-8B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  16. web-llm-models/v0_2_80/Ministral-3-3B-Base-2512-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  17. web-llm-models/v0_2_80/Ministral-3-3B-Instruct-2512-BF16-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  18. web-llm-models/v0_2_80/Ministral-3-3B-Reasoning-2512-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  19. web-llm-models/v0_2_80/Mistral-7B-Instruct-v0.3-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  20. web-llm-models/v0_2_80/Mistral-7B-Instruct-v0.3-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  21. web-llm-models/v0_2_80/Phi-3-mini-4k-instruct-old-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  22. web-llm-models/v0_2_80/Phi-3-mini-4k-instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  23. web-llm-models/v0_2_80/Phi-3-mini-4k-instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  24. web-llm-models/v0_2_80/Phi-3.5-mini-instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  25. web-llm-models/v0_2_80/Phi-3.5-mini-instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  26. web-llm-models/v0_2_80/Phi-3.5-vision-instruct-q4f16_1-ctx4k_cs2k-webgpu.wasm +3 -0
  27. web-llm-models/v0_2_80/Phi-3.5-vision-instruct-q4f32_1-ctx4k_cs2k-webgpu.wasm +3 -0
  28. web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q0f16-ctx4k_cs1k-webgpu.wasm +3 -0
  29. web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q0f32-ctx4k_cs1k-webgpu.wasm +3 -0
  30. web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  31. web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  32. web-llm-models/v0_2_80/Qwen2-1.5B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  33. web-llm-models/v0_2_80/Qwen2-1.5B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  34. web-llm-models/v0_2_80/Qwen2-7B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  35. web-llm-models/v0_2_80/Qwen2-7B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  36. web-llm-models/v0_2_80/Qwen2.5-3B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  37. web-llm-models/v0_2_80/Qwen2.5-3B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  38. web-llm-models/v0_2_80/Qwen3-0.6B-q0f16-ctx4k_cs1k-webgpu.wasm +3 -0
  39. web-llm-models/v0_2_80/Qwen3-0.6B-q0f32-ctx4k_cs1k-webgpu.wasm +3 -0
  40. web-llm-models/v0_2_80/Qwen3-0.6B-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  41. web-llm-models/v0_2_80/Qwen3-0.6B-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  42. web-llm-models/v0_2_80/Qwen3-1.7B-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  43. web-llm-models/v0_2_80/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  44. web-llm-models/v0_2_80/Qwen3-4B-Instruct-2507-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  45. web-llm-models/v0_2_80/Qwen3-4B-Instruct-2507-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  46. web-llm-models/v0_2_80/Qwen3-4B-Thinking-2507-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  47. web-llm-models/v0_2_80/Qwen3-4B-Thinking-2507-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  48. web-llm-models/v0_2_80/Qwen3-4B-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
  49. web-llm-models/v0_2_80/Qwen3-4B-q4f32_1-ctx4k_cs1k-webgpu.wasm +3 -0
  50. web-llm-models/v0_2_80/Qwen3-8B-q4f16_1-ctx4k_cs1k-webgpu.wasm +3 -0
web-llm-models/v0_2_80/Llama-2-13b-chat-hf-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52a52a6a8d8904cb56cacdbdc0ff78a90cd195fda92c66a2640ef1f2742451e6
3
+ size 6685344
web-llm-models/v0_2_80/Llama-2-7b-chat-hf-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5988d0a28cbf992cf462a7ef0d1ce5dc7586f086418f3cd350f98ab88c4b880f
3
+ size 6257536
web-llm-models/v0_2_80/Llama-2-7b-chat-hf-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:993a3996fff1139069e8265bc784d7f2768d3d562a8fe871b437b0633a47614c
3
+ size 6041086
web-llm-models/v0_2_80/Llama-3-70B-Instruct-q3f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ace8e299187b5f33a9b70d21f5d7fa5a3e57f2385297588d61e729cd52c1954a
3
+ size 8675576
web-llm-models/v0_2_80/Llama-3-8B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:addfad8cc99535a23184b965427baf58ac0125f1aa572d8a080a8bce9f7bd594
3
+ size 6287983
web-llm-models/v0_2_80/Llama-3-8B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356464745dff82129d4c1e2ed46d6601d533f583ab2c7ce5ebea41d83e2c206e
3
+ size 6071871
web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q0f16-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1b5c56181fcbbea0bb6059fcccb5df29d1ec7c24dc2e8eb7a4a2f3da2afc652
3
+ size 5259462
web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q0f32-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f21496566a0dc6c76cb7d7442a325de8d6ecced5b3b75e5b33279a7031a5672b
3
+ size 5168895
web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7b8dad5e6e136819f726235ddd8f99da6086f1f4d4b9b2b035249d764a31cc1
3
+ size 5498539
web-llm-models/v0_2_80/Llama-3.2-1B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd4c5654098a833b6367a97b977f355de0a8c8b4cbec3322d2d91f7ef838c5a9
3
+ size 5383379
web-llm-models/v0_2_80/Llama-3.2-3B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34de0d60ab598c6a85ae882b48474f250193076f902057a21070bb2daae96d5b
3
+ size 6131270
web-llm-models/v0_2_80/Llama-3.2-3B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:224e4cd5c235621684257aad3ebfdaf6e337d50f783614d1e7ed63e78d693ad6
3
+ size 5935137
web-llm-models/v0_2_80/Llama-3_1-70B-Instruct-q3f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0479882a8d99f1a24cb4638169b502de6c004fc9ad2b758f2a47aaa7ac241208
3
+ size 8684357
web-llm-models/v0_2_80/Llama-3_1-8B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebfaaeb06478b598990176e57564ef7e36f7a47e8cb1b8bbf888e27305489679
3
+ size 6296764
web-llm-models/v0_2_80/Llama-3_1-8B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40c9e1d68be51144d0f21b42d9e863d6662b59e339af3d46a73da368b814e992
3
+ size 6078595
web-llm-models/v0_2_80/Ministral-3-3B-Base-2512-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9cfbdc5df6d8c4f194201896e1d4514056bbc045c07f073efa9c60d905a03f
3
+ size 5427883
web-llm-models/v0_2_80/Ministral-3-3B-Instruct-2512-BF16-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9cfbdc5df6d8c4f194201896e1d4514056bbc045c07f073efa9c60d905a03f
3
+ size 5427883
web-llm-models/v0_2_80/Ministral-3-3B-Reasoning-2512-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e9cfbdc5df6d8c4f194201896e1d4514056bbc045c07f073efa9c60d905a03f
3
+ size 5427883
web-llm-models/v0_2_80/Mistral-7B-Instruct-v0.3-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bbdc492f08c21911868c55f23fc4a6b8932adf2179bcf26ccf2ad7fbd0561f7
3
+ size 5456419
web-llm-models/v0_2_80/Mistral-7B-Instruct-v0.3-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:900df8f5036b014cfd15639ebde77c3ef998bdbab412e98f4f616b64462ddeda
3
+ size 5320391
web-llm-models/v0_2_80/Phi-3-mini-4k-instruct-old-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db9c6c6043d7049da13c9cffce0d59aae91e1824729d84ad41920e98e627164d
3
+ size 5461675
web-llm-models/v0_2_80/Phi-3-mini-4k-instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db9c6c6043d7049da13c9cffce0d59aae91e1824729d84ad41920e98e627164d
3
+ size 5461675
web-llm-models/v0_2_80/Phi-3-mini-4k-instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ad8cf53263e1db187baa9835e0bfe6ac1c1adfc5c79ace44378cb579bc0c648
3
+ size 5340809
web-llm-models/v0_2_80/Phi-3.5-mini-instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f58ffc1ed1dc812e436e73f3507a9ffdf9c564dd3c55e0ae3be789d4a2baa6f
3
+ size 5471501
web-llm-models/v0_2_80/Phi-3.5-mini-instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ca419625e62d6b2a5980725523b077465f810fcf03080adf03366f6b28ddb1f
3
+ size 5350232
web-llm-models/v0_2_80/Phi-3.5-vision-instruct-q4f16_1-ctx4k_cs2k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:279661c096bcabdcaaa4652b6586ab1aa9b31acf546364af7bfb087b0862b205
3
+ size 7017449
web-llm-models/v0_2_80/Phi-3.5-vision-instruct-q4f32_1-ctx4k_cs2k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aa07be8fb7d24b4f730131a250539f347d694ed42e6bd8d141ce71849745190
3
+ size 6882723
web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q0f16-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab3b3d81d8c6a15c1bd166a93a1e726fdd898736b855c7aa71f8379df56bd102
3
+ size 4934604
web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q0f32-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22ff63b9ac16e775e4ad3ecc3416931424b9b969281ed5963cb9006a35399e88
3
+ size 4905353
web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25495a03bf7ac80a1925bc91e236c74c3ad52be89a9ed0d0eeb18e4a9d135234
3
+ size 5008576
web-llm-models/v0_2_80/Qwen2-0.5B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9e8485cdb34adba3afa3b01ea31d323a96fc95e2cf04535f5f5841336efbe94
3
+ size 4980266
web-llm-models/v0_2_80/Qwen2-1.5B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cd9f132ad258d2017291bb62e955e337f8ae4af74650f4b1ea5a803a7cec538
3
+ size 5383844
web-llm-models/v0_2_80/Qwen2-1.5B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a7c95f4dd67ae11c91e7ebe8e849e9701be34ecf9b83963bb45a6ba1aaa402
3
+ size 5265971
web-llm-models/v0_2_80/Qwen2-7B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1189537b68f7c3a9a1535cf9fb74d9b550b9b86fe7f61d895bec9645af31f2c
3
+ size 5498734
web-llm-models/v0_2_80/Qwen2-7B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f1bb3c1c6148a07fe0c3bc8903d432c9ef213c1e8455d318e5b7fa8f90bfc57
3
+ size 5372989
web-llm-models/v0_2_80/Qwen2.5-3B-Instruct-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba07594162783740a4426007351cbc0ee1c30d6d0da9617561b6a3a27fe3b63f
3
+ size 5599880
web-llm-models/v0_2_80/Qwen2.5-3B-Instruct-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec5719a9167313fb4fa3d379be8eed8b6920e47d1f5467609003b85197b1a74b
3
+ size 5461029
web-llm-models/v0_2_80/Qwen3-0.6B-q0f16-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f72edc58397cee4f593dba539c4b944ac25cf691eb9f614b8c8b3ae7765e94c5
3
+ size 5564970
web-llm-models/v0_2_80/Qwen3-0.6B-q0f32-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78bacc57e62992b78c4270fb648ac5a48512224888a74ba259f68d22bfa74f86
3
+ size 5474695
web-llm-models/v0_2_80/Qwen3-0.6B-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a51cff80ad5fe1539e033a83b9a7d240957d37e3d8e620c6b43648aba90a0cc
3
+ size 5760161
web-llm-models/v0_2_80/Qwen3-0.6B-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6608536eb7173c62652e2186f53af5b908fd66542c12eaa7860a9b7f4c7db298
3
+ size 5646002
web-llm-models/v0_2_80/Qwen3-1.7B-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b0acc85b34ee45019d8df373afdba0da889caa32970169d34710b4fe43d3b06
3
+ size 5790731
web-llm-models/v0_2_80/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:555e177216386434e8de9a7ca1f01706de41b33762e40b1f8b5d54a0732dbdf0
3
+ size 5671614
web-llm-models/v0_2_80/Qwen3-4B-Instruct-2507-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a739267ccbb44057a7886797ed0855a5cdba206ab25bcc73732ec03e1738d32
3
+ size 6070240
web-llm-models/v0_2_80/Qwen3-4B-Instruct-2507-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ea680da66e6a4cd51ba067c5d3d9be8bf365c0477803bccec3f261f9b303a6
3
+ size 5929497
web-llm-models/v0_2_80/Qwen3-4B-Thinking-2507-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a739267ccbb44057a7886797ed0855a5cdba206ab25bcc73732ec03e1738d32
3
+ size 6070240
web-llm-models/v0_2_80/Qwen3-4B-Thinking-2507-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8ea680da66e6a4cd51ba067c5d3d9be8bf365c0477803bccec3f261f9b303a6
3
+ size 5929497
web-llm-models/v0_2_80/Qwen3-4B-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ddf44e49b03e53e24fd29a45591850924346140452f60c29280190388571340
3
+ size 6070240
web-llm-models/v0_2_80/Qwen3-4B-q4f32_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6484148ff499c15433ab85e4d2c376aef453a6d797f48047e6bd6fd495a59e39
3
+ size 5929497
web-llm-models/v0_2_80/Qwen3-8B-q4f16_1-ctx4k_cs1k-webgpu.wasm ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd0b332c81212e484ba56c15aac5af2548f14f849d3352039ad68d3c4663b6dd
3
+ size 6079574