Update README.md
Browse files
README.md
CHANGED
|
@@ -80,6 +80,42 @@ inputs = tokenizer(prompt, return_tensors="pt", return_attention_mask=False).to(
|
|
| 80 |
generated_text = model.generate(**inputs, max_length=3084, top_p=0.95, do_sample=True, temperature=0.6, use_cache=True, streamer=streamer)
|
| 81 |
```
|
| 82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
## Example responses
|
| 85 |
|
|
|
|
| 80 |
generated_text = model.generate(**inputs, max_length=3084, top_p=0.95, do_sample=True, temperature=0.6, use_cache=True, streamer=streamer)
|
| 81 |
```
|
| 82 |
|
| 83 |
+
*Directly using this model from GGUF*
|
| 84 |
+
|
| 85 |
+
```python
|
| 86 |
+
%pip install -U 'webscout[local]'
|
| 87 |
+
|
| 88 |
+
from webscout.Local.utils import download_model
|
| 89 |
+
from webscout.Local.model import Model
|
| 90 |
+
from webscout.Local.thread import Thread
|
| 91 |
+
from webscout.Local import formats
|
| 92 |
+
from webscout.Local.samplers import SamplerSettings
|
| 93 |
+
|
| 94 |
+
# 1. Download the model
|
| 95 |
+
repo_id = "OEvortex/HelpingAI-3B-hindi"
|
| 96 |
+
filename = "HelpingAI-3B-hindi.Q4_K_M.gguf"
|
| 97 |
+
model_path = download_model(repo_id, filename, token='') # Replace with your Hugging Face token if needed
|
| 98 |
+
|
| 99 |
+
# 2. Load the model
|
| 100 |
+
model = Model(model_path, n_gpu_layers=20)
|
| 101 |
+
|
| 102 |
+
# 3. Define the system prompt
|
| 103 |
+
system_prompt = "You are HelpingAI-3B, an emotionally intelligent AI designed to provide empathetic and supportive responses in HelpingAI style."
|
| 104 |
+
|
| 105 |
+
# 4. Create a custom chatml format with your system prompt
|
| 106 |
+
custom_chatml = formats.chatml.copy()
|
| 107 |
+
custom_chatml['system_content'] = system_prompt
|
| 108 |
+
|
| 109 |
+
# 5. Define your sampler settings (optional)
|
| 110 |
+
sampler = SamplerSettings(temp=0.7, top_p=0.9) # Adjust as needed
|
| 111 |
+
|
| 112 |
+
# 6. Create a Thread with the custom format and sampler
|
| 113 |
+
thread = Thread(model, custom_chatml, sampler=sampler)
|
| 114 |
+
|
| 115 |
+
# 7. Start interacting with the model
|
| 116 |
+
thread.interact(header="🌟 HelpingAI-3B: Emotionally Intelligent Conversational AI for All Devices 🌟", color=True)
|
| 117 |
+
|
| 118 |
+
```
|
| 119 |
|
| 120 |
## Example responses
|
| 121 |
|