Commit
·
0ed2b5e
verified
·
0
Parent(s):
Super-squash history to reclaim storage
Browse files- .gitattributes +63 -0
- README.md +127 -0
- instinct-bf16.gguf +3 -0
- instinct-bf16_q8_0.gguf +3 -0
- instinct-f16_q8_0.gguf +3 -0
- instinct-imatrix.gguf +3 -0
- instinct-iq1_m.gguf +3 -0
- instinct-iq1_s.gguf +3 -0
- instinct-iq2_m.gguf +3 -0
- instinct-iq2_s.gguf +3 -0
- instinct-iq2_xs.gguf +3 -0
- instinct-iq2_xxs.gguf +3 -0
- instinct-iq3_m.gguf +3 -0
- instinct-iq3_xs.gguf +3 -0
- instinct-iq3_xxs.gguf +3 -0
- instinct-iq4_nl.gguf +3 -0
- instinct-iq4_xs.gguf +3 -0
- instinct-q2_k_m.gguf +3 -0
- instinct-q2_k_s.gguf +3 -0
- instinct-q3_k_m.gguf +3 -0
- instinct-q3_k_s.gguf +3 -0
- instinct-q4_0.gguf +3 -0
- instinct-q4_1.gguf +3 -0
- instinct-q4_k_m.gguf +3 -0
- instinct-q4_k_s.gguf +3 -0
- instinct-q5_0.gguf +3 -0
- instinct-q5_1.gguf +3 -0
- instinct-q5_k_m.gguf +3 -0
- instinct-q6_k_m.gguf +3 -0
- instinct-q8_0.gguf +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
instinct-f16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
instinct-bf16_q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
instinct-q2_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
instinct-q2_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
instinct-q3_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
instinct-q3_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
instinct-q4_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
instinct-q5_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
instinct-q6_k_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
instinct-q4_k_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
instinct-q8_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
instinct-q4_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
instinct-q4_1.gguf filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
instinct-q5_0.gguf filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
instinct-q5_1.gguf filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
instinct-iq1_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
instinct-iq1_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
instinct-iq2_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
instinct-iq2_xxs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
instinct-iq2_s.gguf filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
instinct-iq2_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
instinct-iq3_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
instinct-iq3_xxs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
instinct-iq3_m.gguf filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
instinct-iq4_xs.gguf filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
instinct-iq4_nl.gguf filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
instinct-imatrix.gguf filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
instinct-bf16.gguf filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
datasets:
|
| 4 |
+
- continuedev/instinct-data
|
| 5 |
+
base_model:
|
| 6 |
+
- Qwen/Qwen2.5-Coder-7B
|
| 7 |
+
pipeline_tag: text-generation
|
| 8 |
+
library_name: transformers
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# <span style="color: #7FFF7F;">instinct GGUF Models</span>
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
## <span style="color: #7F7FFF;">Model Generation Details</span>
|
| 15 |
+
|
| 16 |
+
This model was generated using [llama.cpp](https://github.com/ggerganov/llama.cpp) at commit [`408ff524`](https://github.com/ggerganov/llama.cpp/commit/408ff524b40baf4f51a81d42a9828200dd4fcb6b).
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
---
|
| 23 |
+
|
| 24 |
+
## <span style="color: #7FFF7F;">Quantization Beyond the IMatrix</span>
|
| 25 |
+
|
| 26 |
+
I've been experimenting with a new quantization approach that selectively elevates the precision of key layers beyond what the default IMatrix configuration provides.
|
| 27 |
+
|
| 28 |
+
In my testing, standard IMatrix quantization underperforms at lower bit depths, especially with Mixture of Experts (MoE) models. To address this, I'm using the `--tensor-type` option in `llama.cpp` to manually "bump" important layers to higher precision. You can see the implementation here:
|
| 29 |
+
👉 [Layer bumping with llama.cpp](https://github.com/Mungert69/GGUFModelBuilder/blob/main/model-converter/tensor_list_builder.py)
|
| 30 |
+
|
| 31 |
+
While this does increase model file size, it significantly improves precision for a given quantization level.
|
| 32 |
+
|
| 33 |
+
### **I'd love your feedback—have you tried this? How does it perform for you?**
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
<a href="https://readyforquantum.com/huggingface_gguf_selection_guide.html" style="color: #7FFF7F;">
|
| 41 |
+
Click here to get info on choosing the right GGUF model format
|
| 42 |
+
</a>
|
| 43 |
+
|
| 44 |
+
---
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
<!--Begin Original Model Card-->
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
<img src="https://cdn-uploads.huggingface.co/production/uploads/686c5c546abedce0f7ac048a/B7PeaDQCDnlgT3Tmf7fsb.png" width=250>
|
| 52 |
+
|
| 53 |
+
# Instinct, the State-of-the-Art Open Next Edit Model
|
| 54 |
+
|
| 55 |
+
This repo contains the model weights for [Continue](https://continue.dev)'s state-of-the-art open Next Edit model, **Instinct**. Robustly fine-tuned from Qwen2.5-Coder-7B on our [dataset of real-world code edits](https://huggingface.co/datasets/continuedev/instinct-data), Instinct intelligently predicts your next move to keep you in flow.
|
| 56 |
+
|
| 57 |
+
## Serving the model
|
| 58 |
+
|
| 59 |
+
**Ollama**: We've released a [Q4_K_M GGUF quantization of Instinct](https://huggingface.co/continuedev/instinct-GGUF) for efficient local inference. Try it with [Continue's Ollama integration](https://docs.continue.dev/guides/ollama-guide), or just run `ollama run nate/instinct`.
|
| 60 |
+
|
| 61 |
+
You can also serve the model using either of the below options, then [connect it with Continue](https://docs.continue.dev/guides/how-to-self-host-a-model).
|
| 62 |
+
|
| 63 |
+
**SGLang**: `python3 -m sglang.launch_server --model-path continuedev/instinct --load-format safetensors`
|
| 64 |
+
<br>**vLLM**: `vllm serve continuedev/instinct --served-model-name instinct --load-format safetensors`
|
| 65 |
+
|
| 66 |
+
## Learn more
|
| 67 |
+
|
| 68 |
+
For more information on the work behind Instinct, please refer to our [blog](https://blog.continue.dev/instinct/).
|
| 69 |
+
|
| 70 |
+
<!--End Original Model Card-->
|
| 71 |
+
|
| 72 |
+
---
|
| 73 |
+
|
| 74 |
+
# <span id="testllm" style="color: #7F7FFF;">🚀 If you find these models useful</span>
|
| 75 |
+
|
| 76 |
+
Help me test my **AI-Powered Quantum Network Monitor Assistant** with **quantum-ready security checks**:
|
| 77 |
+
|
| 78 |
+
👉 [Quantum Network Monitor](https://readyforquantum.com/?assistant=open&utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
The full Open Source Code for the Quantum Network Monitor Service available at my github repos ( repos with NetworkMonitor in the name) : [Source Code Quantum Network Monitor](https://github.com/Mungert69). You will also find the code I use to quantize the models if you want to do it yourself [GGUFModelBuilder](https://github.com/Mungert69/GGUFModelBuilder)
|
| 82 |
+
|
| 83 |
+
💬 **How to test**:
|
| 84 |
+
Choose an **AI assistant type**:
|
| 85 |
+
- `TurboLLM` (GPT-4.1-mini)
|
| 86 |
+
- `HugLLM` (Hugginface Open-source models)
|
| 87 |
+
- `TestLLM` (Experimental CPU-only)
|
| 88 |
+
|
| 89 |
+
### **What I’m Testing**
|
| 90 |
+
I’m pushing the limits of **small open-source models for AI network monitoring**, specifically:
|
| 91 |
+
- **Function calling** against live network services
|
| 92 |
+
- **How small can a model go** while still handling:
|
| 93 |
+
- Automated **Nmap security scans**
|
| 94 |
+
- **Quantum-readiness checks**
|
| 95 |
+
- **Network Monitoring tasks**
|
| 96 |
+
|
| 97 |
+
🟡 **TestLLM** – Current experimental model (llama.cpp on 2 CPU threads on huggingface docker space):
|
| 98 |
+
- ✅ **Zero-configuration setup**
|
| 99 |
+
- ⏳ 30s load time (slow inference but **no API costs**) . No token limited as the cost is low.
|
| 100 |
+
- 🔧 **Help wanted!** If you’re into **edge-device AI**, let’s collaborate!
|
| 101 |
+
|
| 102 |
+
### **Other Assistants**
|
| 103 |
+
🟢 **TurboLLM** – Uses **gpt-4.1-mini** :
|
| 104 |
+
- **It performs very well but unfortunatly OpenAI charges per token. For this reason tokens usage is limited.
|
| 105 |
+
- **Create custom cmd processors to run .net code on Quantum Network Monitor Agents**
|
| 106 |
+
- **Real-time network diagnostics and monitoring**
|
| 107 |
+
- **Security Audits**
|
| 108 |
+
- **Penetration testing** (Nmap/Metasploit)
|
| 109 |
+
|
| 110 |
+
🔵 **HugLLM** – Latest Open-source models:
|
| 111 |
+
- 🌐 Runs on Hugging Face Inference API. Performs pretty well using the lastest models hosted on Novita.
|
| 112 |
+
|
| 113 |
+
### 💡 **Example commands you could test**:
|
| 114 |
+
1. `"Give me info on my websites SSL certificate"`
|
| 115 |
+
2. `"Check if my server is using quantum safe encyption for communication"`
|
| 116 |
+
3. `"Run a comprehensive security audit on my server"`
|
| 117 |
+
4. '"Create a cmd processor to .. (what ever you want)" Note you need to install a [Quantum Network Monitor Agent](https://readyforquantum.com/Download/?utm_source=huggingface&utm_medium=referral&utm_campaign=huggingface_repo_readme) to run the .net code on. This is a very flexible and powerful feature. Use with caution!
|
| 118 |
+
|
| 119 |
+
### Final Word
|
| 120 |
+
|
| 121 |
+
I fund the servers used to create these model files, run the Quantum Network Monitor service, and pay for inference from Novita and OpenAI—all out of my own pocket. All the code behind the model creation and the Quantum Network Monitor project is [open source](https://github.com/Mungert69). Feel free to use whatever you find helpful.
|
| 122 |
+
|
| 123 |
+
If you appreciate the work, please consider [buying me a coffee](https://www.buymeacoffee.com/mahadeva) ☕. Your support helps cover service costs and allows me to raise token limits for everyone.
|
| 124 |
+
|
| 125 |
+
I'm also open to job opportunities or sponsorship.
|
| 126 |
+
|
| 127 |
+
Thank you! 😊
|
instinct-bf16.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4339802dbfa36e050293c38fe5af73a93911061a7e8ac053dd3ac4e6edf1ef35
|
| 3 |
+
size 15237853856
|
instinct-bf16_q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f10be75e15d5bb0656516ac9fbb2f264cca82861511c4a82fb2f69e5e99d02fc
|
| 3 |
+
size 11287999136
|
instinct-f16_q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:75ebc3d66a57e8bf72a402d4336dcc5f7f8ba24c6efc9fea6e77c04a1004e761
|
| 3 |
+
size 11287999136
|
instinct-imatrix.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:54f5785c4f25b003fd8bd2d99ace21c2abc4466d311fedbe0587504e5a460ee6
|
| 3 |
+
size 4560416
|
instinct-iq1_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb7e59bdad9c11281e0fc49239be445006706168e9a04b5ba74089547ceb9cd6
|
| 3 |
+
size 2571998144
|
instinct-iq1_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8cca9e998936dd446e556637830a44a33ebab37cbc0edf3bcb71f01c4457e341
|
| 3 |
+
size 2278224832
|
instinct-iq2_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2cd7291a2b6e2a2038990ce4ebbf915bcb5714473126c5fc040bc28383fe1930
|
| 3 |
+
size 3041731520
|
instinct-iq2_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ad413541af0df43413e39b927f94832d5f4fccb2f67fabe43c69ce506d99b2ca
|
| 3 |
+
size 2915574720
|
instinct-iq2_xs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:82f9c4ad630147101ba99b9e207d8e97517e53eb6bd4e519d81140b9d9f03445
|
| 3 |
+
size 2839335872
|
instinct-iq2_xxs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ca4698cb6488846852a045ff648d0b87789ed30808d1153cf72e5008f6f98eb
|
| 3 |
+
size 2652107712
|
instinct-iq3_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:96fa032440da46e8e3de8bcc23de921abbca43f3c89e8772d309a09b43c25785
|
| 3 |
+
size 3924456384
|
instinct-iq3_xs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:283f80760790c747702feb8f311e3231a5153f70006f6b9f9eeffe92d6e3444a
|
| 3 |
+
size 3594814400
|
instinct-iq3_xxs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:904ac9105b582313ce9bac4b0a1d5ac541f1bea96addda7030d8f35266e4378c
|
| 3 |
+
size 3524568000
|
instinct-iq4_nl.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:705d0d13c6f981568e08f8ceb93d20f1dddb4b64d3f317cd4e5cd65044252984
|
| 3 |
+
size 4297307072
|
instinct-iq4_xs.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:519ce9758797ea7727909b2aeb8cf97fc9e59be9f6383e11e7d92207341cfeec
|
| 3 |
+
size 4376011712
|
instinct-q2_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:84ce59d616c22a419099425cd776ae905bf0027bccbe67a21f38c378cb2938aa
|
| 3 |
+
size 3119346624
|
instinct-q2_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:254f74770f52c943cf451407d99bfae3c20a9bb96c1de947ac89d1dc1f4b4c3a
|
| 3 |
+
size 2983097280
|
instinct-q3_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:19b3d1fcb6f0077090675c016181e9f763264bc9dcf76a708e617352ddf0bd1e
|
| 3 |
+
size 4003505088
|
instinct-q3_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1d5cd3557d690eca48eed5e76f20eba363357fbc3a92bfec34cbd4c6d787b3e
|
| 3 |
+
size 3858740160
|
instinct-q4_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69cfaa19b381fb8979689069584dcdc9aff7b1990592529f1121ba9a0a613d09
|
| 3 |
+
size 4835881920
|
instinct-q4_1.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d1b13f2c7d4c7832f9163e7a93935fd67b15d9df097a9aef4eccc68361fcb4ba
|
| 3 |
+
size 4834964416
|
instinct-q4_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9bd9df32d4702d7398e06be5dd41920e7de9b87bcafa5bdea31abcf1a15755aa
|
| 3 |
+
size 5041632192
|
instinct-q4_k_s.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c7bc7d3988052b0ee5f19af8debc2ed8edd6b28df27f62871c0529af544a5d0
|
| 3 |
+
size 4634059712
|
instinct-q5_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7dc4bfe99417e54d291ddeddd73d494b84d0ebaf0b41db9940a475b726e66cb1
|
| 3 |
+
size 5651542976
|
instinct-q5_1.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:80a3d183064f71377cccccec4e288fe19d4be140f57a72d2732e96f190d12843
|
| 3 |
+
size 6059373504
|
instinct-q5_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6c205146c714dee1fa691fcefea8fe6293c468938ddf315bd44cf4862ab45ab
|
| 3 |
+
size 5791433664
|
instinct-q6_k_m.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a82d6c2a60b138ab9fc29e50e52095f46ce0a63de8ce25d169b842fe5b6512fa
|
| 3 |
+
size 6518182848
|
instinct-q8_0.gguf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c9db5f24aae24edce57b77414f8b00dc0efca440076faa7e944e28363f5ccf1
|
| 3 |
+
size 8098525856
|