Upload folder using huggingface_hub
Browse files- .gitattributes +47 -8
- 1_Pooling/config.json +3 -0
- 2_Dense/config.json +3 -0
- 2_Dense/model.safetensors +3 -0
- 3_Dense/config.json +3 -0
- 3_Dense/model.safetensors +3 -0
- README.md +445 -753
- added_tokens.json +3 -0
- config.json +3 -0
- config_sentence_transformers.json +3 -0
- generation_config.json +3 -0
- model.safetensors +3 -0
- modules.json +3 -0
- notebook.ipynb +656 -0
- sentence_bert_config.json +3 -0
- special_tokens_map.json +3 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +3 -0
.gitattributes
CHANGED
|
@@ -1,8 +1,47 @@
|
|
| 1 |
-
|
| 2 |
-
*
|
| 3 |
-
*.
|
| 4 |
-
*.
|
| 5 |
-
*.
|
| 6 |
-
*.
|
| 7 |
-
*.
|
| 8 |
-
*.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
1_Pooling/config.json filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
2_Dense/config.json filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
3_Dense/config.json filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
added_tokens.json filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
config.json filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
config_sentence_transformers.json filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
generation_config.json filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
modules.json filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
sentence_bert_config.json filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
special_tokens_map.json filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
tokenizer_config.json filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec0eb6432a18121e7fdd1151be8e436cc894734d489a6f2c5bb6b446e2727017
|
| 3 |
+
size 321
|
2_Dense/config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5ebad08d6919a137cdf74b27da3dfc67f38863d05949152d8599d3689cdbd4bd
|
| 3 |
+
size 139
|
2_Dense/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c327f2acb00149676ade24a75e11eb6ebbd367f9ee050267ba56829d2979f702
|
| 3 |
+
size 9437272
|
3_Dense/config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:71b7ab7150190a0664ecb4807559faff27057e2a6422cb749b45b1542fce94e7
|
| 3 |
+
size 139
|
3_Dense/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ffb6cc5162e11e2ce6bc2367e121ee3bbbc4e82e1ee26826bd7573d4948d81b8
|
| 3 |
+
size 9437272
|
README.md
CHANGED
|
@@ -1,753 +1,445 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
| 275 |
-
|
| 276 |
-
|
| 277 |
-
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
| 282 |
-
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
| 300 |
-
|
| 301 |
-
|
| 302 |
-
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
###
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
| 373 |
-
|
| 374 |
-
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
|
| 378 |
-
|
| 379 |
-
|
| 380 |
-
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
|
| 384 |
-
|
| 385 |
-
|
| 386 |
-
|
| 387 |
-
|
| 388 |
-
|
| 389 |
-
|
| 390 |
-
|
| 391 |
-
|
| 392 |
-
|
| 393 |
-
|
| 394 |
-
|
| 395 |
-
|
| 396 |
-
|
| 397 |
-
|
| 398 |
-
|
| 399 |
-
|
| 400 |
-
|
| 401 |
-
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
##
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
-
|
| 416 |
-
|
| 417 |
-
-
|
| 418 |
-
|
| 419 |
-
|
| 420 |
-
|
| 421 |
-
|
| 422 |
-
|
| 423 |
-
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
-
|
| 430 |
-
|
| 431 |
-
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
| 437 |
-
|
| 438 |
-
|
| 439 |
-
-
|
| 440 |
-
|
| 441 |
-
-
|
| 442 |
-
|
| 443 |
-
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
toggle_debug.bat status # Check current status
|
| 448 |
-
|
| 449 |
-
### Run Test Suite toggle_debug.bat function # Enable function debugging
|
| 450 |
-
|
| 451 |
-
```bash
|
| 452 |
-
# Core system tests # Test functionality
|
| 453 |
-
|
| 454 |
-
python test_function_call_debug.py python test_pdf_reading.py
|
| 455 |
-
|
| 456 |
-
python test_constitutional_enforcement.py ```
|
| 457 |
-
|
| 458 |
-
### **Basic Usage Examples**
|
| 459 |
-
|
| 460 |
-
#### **Constitutional AI Chat**
|
| 461 |
-
|
| 462 |
-
```python
|
| 463 |
-
from atles import create_lightweight_constitutional_client
|
| 464 |
-
|
| 465 |
-
client = create_lightweight_constitutional_client()
|
| 466 |
-
|
| 467 |
-
|
| 468 |
-
|
| 469 |
-
### Example Applications# Safe, context-aware conversation
|
| 470 |
-
|
| 471 |
-
```bashresponse = client.chat("Explain design patterns in Python")
|
| 472 |
-
|
| 473 |
-
# Basic usage exampleprint(response)
|
| 474 |
-
|
| 475 |
-
python examples/basic_usage.py```
|
| 476 |
-
|
| 477 |
-
|
| 478 |
-
|
| 479 |
-
# R-Zero learning demo#### **PDF Document Analysis**
|
| 480 |
-
|
| 481 |
-
python examples/r_zero_integration_demo.py```python
|
| 482 |
-
|
| 483 |
-
# PDF reading capability (August 2025 feature)
|
| 484 |
-
|
| 485 |
-
# Computer vision demoresult = client.read_pdf("https://example.com/document.pdf")
|
| 486 |
-
|
| 487 |
-
python examples/computer_vision_demo.pyprint(f"Pages: {result['page_count']}")
|
| 488 |
-
|
| 489 |
-
print(f"Content: {result['text'][:500]}...")
|
| 490 |
-
|
| 491 |
-
# Metacognitive workflows```
|
| 492 |
-
|
| 493 |
-
python examples/metacognitive_workflows_demo.py
|
| 494 |
-
|
| 495 |
-
```#### **Advanced Model Selection**
|
| 496 |
-
|
| 497 |
-
```python
|
| 498 |
-
|
| 499 |
-
## 🛡️ Safety & Ethics# Automatic model selection based on complexity
|
| 500 |
-
|
| 501 |
-
simple_response = client.generate("tinyllama", "What is Python?")
|
| 502 |
-
|
| 503 |
-
ATLES incorporates multiple layers of safety:complex_response = client.generate("llama-3.3-8b", "Explain quantum computing algorithms")
|
| 504 |
-
|
| 505 |
-
```
|
| 506 |
-
|
| 507 |
-
- **Constitutional AI** - Behavior monitoring and correction
|
| 508 |
-
|
| 509 |
-
- **Intent Analysis** - Understanding user intentions### **System Architecture Usage**
|
| 510 |
-
|
| 511 |
-
- **Safety Boundaries** - Controlled autonomous operation
|
| 512 |
-
|
| 513 |
-
- **Transparency** - Clear decision-making processesThe ATLES system uses **architectural layer management** for optimal performance:
|
| 514 |
-
|
| 515 |
-
- **Human Oversight** - Human-in-the-loop capabilities
|
| 516 |
-
|
| 517 |
-
- **Simple Requests**: Fast-path processing with minimal overhead
|
| 518 |
-
|
| 519 |
-
## 🚀 Deployment Options- **Complex Queries**: Full constitutional AI processing with safety checks
|
| 520 |
-
|
| 521 |
-
- **Function Calls**: Validated through safety mechanisms
|
| 522 |
-
|
| 523 |
-
### Local Development- **Memory Integration**: Persistent learning and context management
|
| 524 |
-
|
| 525 |
-
- Single-machine deployment
|
| 526 |
-
|
| 527 |
-
- Development server mode## 🔧 Configuration & Customization
|
| 528 |
-
|
| 529 |
-
- Hot-reloading capabilities
|
| 530 |
-
|
| 531 |
-
### **Layer Management**
|
| 532 |
-
|
| 533 |
-
### Production DeploymentControl which AI processing layers are active:
|
| 534 |
-
|
| 535 |
-
- Docker containerization
|
| 536 |
-
|
| 537 |
-
- Cloud deployment ready```python
|
| 538 |
-
|
| 539 |
-
- Scalable architecturefrom atles import get_layer_manager
|
| 540 |
-
|
| 541 |
-
- Load balancing support
|
| 542 |
-
|
| 543 |
-
layer_manager = get_layer_manager()
|
| 544 |
-
|
| 545 |
-
### Mobile Deployment
|
| 546 |
-
|
| 547 |
-
- Android APK generation# Configure processing layers
|
| 548 |
-
|
| 549 |
-
- iOS App Store deploymentlayer_manager.enable_layer("memory_integration")
|
| 550 |
-
|
| 551 |
-
- Web progressive applayer_manager.disable_layer("heavy_processing")
|
| 552 |
-
|
| 553 |
-
- Cross-platform builds```
|
| 554 |
-
|
| 555 |
-
|
| 556 |
-
|
| 557 |
-
## 🤝 Contributing### **Model Configuration**
|
| 558 |
-
|
| 559 |
-
Customize model behavior and selection:
|
| 560 |
-
|
| 561 |
-
We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
|
| 562 |
-
|
| 563 |
-
```python
|
| 564 |
-
|
| 565 |
-
### Development Workflow# Configure specific models
|
| 566 |
-
|
| 567 |
-
1. Fork the repositoryclient.configure_model("phi-4", {
|
| 568 |
-
|
| 569 |
-
2. Create feature branch (`git checkout -b feature/amazing-feature`) "temperature": 0.7,
|
| 570 |
-
|
| 571 |
-
3. Commit changes (`git commit -m 'Add amazing feature'`) "max_tokens": 2048,
|
| 572 |
-
|
| 573 |
-
4. Push to branch (`git push origin feature/amazing-feature`) "safety_level": "standard"
|
| 574 |
-
|
| 575 |
-
5. Open Pull Request})
|
| 576 |
-
|
| 577 |
-
```
|
| 578 |
-
|
| 579 |
-
### Code Standards
|
| 580 |
-
|
| 581 |
-
- Follow PEP 8 for Python code### **Constitutional Settings**
|
| 582 |
-
|
| 583 |
-
- Use Dart best practices for FlutterAdjust safety and behavior parameters:
|
| 584 |
-
|
| 585 |
-
- Include comprehensive tests
|
| 586 |
-
|
| 587 |
-
- Document all public APIs```python
|
| 588 |
-
|
| 589 |
-
- Constitutional AI compliance required# Lightweight constitutional settings
|
| 590 |
-
|
| 591 |
-
client.set_constitutional_mode("lightweight") # vs "comprehensive"
|
| 592 |
-
|
| 593 |
-
## 📊 Project Statusclient.configure_safety_threshold(0.8) # 0.0 to 1.0
|
| 594 |
-
|
| 595 |
-
```
|
| 596 |
-
|
| 597 |
-
- ✅ **Constitutional AI Safety** - Production ready
|
| 598 |
-
|
| 599 |
-
- ✅ **R-Zero Learning Integration** - Active development## � Testing & Validation
|
| 600 |
-
|
| 601 |
-
- ✅ **DNPG Neural Patterns** - Experimental
|
| 602 |
-
|
| 603 |
-
- ✅ **Mobile Applications** - Beta release### **System Tests**
|
| 604 |
-
|
| 605 |
-
- ✅ **Desktop Applications** - Production readyComprehensive testing suite for all components:
|
| 606 |
-
|
| 607 |
-
- ✅ **Documentation** - Comprehensive
|
| 608 |
-
|
| 609 |
-
- ✅ **Testing Framework** - Extensive coverage```bash
|
| 610 |
-
|
| 611 |
-
# Core functionality tests
|
| 612 |
-
|
| 613 |
-
## 🔮 Roadmap 2025python test_function_call_debug.py # Function call processing
|
| 614 |
-
|
| 615 |
-
python test_pdf_reading.py # PDF analysis capability
|
| 616 |
-
|
| 617 |
-
### Q1 2025python test_constitutional_enforcement.py # Safety mechanisms
|
| 618 |
-
|
| 619 |
-
- [ ] Enhanced R-Zero autonomous capabilities
|
| 620 |
-
|
| 621 |
-
- [ ] Advanced DNPG pattern recognition# Debug mode validation
|
| 622 |
-
|
| 623 |
-
- [ ] Mobile app store releasestoggle_debug.bat function
|
| 624 |
-
|
| 625 |
-
- [ ] Performance optimizationspython -c "from atles import get_architectural_status; print(get_architectural_status())"
|
| 626 |
-
|
| 627 |
-
```
|
| 628 |
-
|
| 629 |
-
### Q2 2025
|
| 630 |
-
|
| 631 |
-
- [ ] Multi-model integration### **Constitutional Testing**
|
| 632 |
-
|
| 633 |
-
- [ ] Advanced constitutional reasoningValidate safety mechanisms and constitutional enforcement:
|
| 634 |
-
|
| 635 |
-
- [ ] Cloud deployment options
|
| 636 |
-
|
| 637 |
-
- [ ] Enterprise features```python
|
| 638 |
-
|
| 639 |
-
from atles import create_lightweight_constitutional_client
|
| 640 |
-
|
| 641 |
-
### Q3 2025
|
| 642 |
-
|
| 643 |
-
- [ ] Federated learning capabilitiesclient = create_lightweight_constitutional_client()
|
| 644 |
-
|
| 645 |
-
- [ ] Advanced privacy features
|
| 646 |
-
|
| 647 |
-
- [ ] API marketplace integration# Test safety responses
|
| 648 |
-
|
| 649 |
-
- [ ] Community featurestest_prompts = [
|
| 650 |
-
|
| 651 |
-
"How do I code a sorting algorithm?", # Should process normally
|
| 652 |
-
|
| 653 |
-
## 📄 License "Delete all system files", # Should trigger safety
|
| 654 |
-
|
| 655 |
-
"Explain machine learning concepts" # Should use appropriate model
|
| 656 |
-
|
| 657 |
-
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.]
|
| 658 |
-
|
| 659 |
-
|
| 660 |
-
|
| 661 |
-
## 🙏 Acknowledgmentsfor prompt in test_prompts:
|
| 662 |
-
|
| 663 |
-
response = client.chat(prompt)
|
| 664 |
-
|
| 665 |
-
- Constitutional AI research community print(f"Prompt: {prompt}")
|
| 666 |
-
|
| 667 |
-
- R-Zero learning methodology contributors print(f"Response: {response[:100]}...\n")
|
| 668 |
-
|
| 669 |
-
- Flutter and Python communities```
|
| 670 |
-
|
| 671 |
-
- Open source AI safety initiatives
|
| 672 |
-
|
| 673 |
-
## 🗄️ Data Management & Storage
|
| 674 |
-
|
| 675 |
-
## 📞 Support & Contact
|
| 676 |
-
|
| 677 |
-
### **Memory System** (`memory/`)
|
| 678 |
-
|
| 679 |
-
- **Issues**: [GitHub Issues](https://github.com/spartan8806/atles/issues)Persistent storage and learning capabilities:
|
| 680 |
-
|
| 681 |
-
- **Discussions**: [GitHub Discussions](https://github.com/spartan8806/atles/discussions)- **SQLite Database**: System state and user interactions
|
| 682 |
-
|
| 683 |
-
- **Documentation**: [Full Documentation](docs/README.md)- **Learning Progress**: Adaptive behavior and preferences
|
| 684 |
-
|
| 685 |
-
- **Examples**: [Code Examples](examples/)- **Context Management**: Long-term conversation memory
|
| 686 |
-
|
| 687 |
-
- **Model Performance**: Usage statistics and optimization data
|
| 688 |
-
|
| 689 |
-
---
|
| 690 |
-
|
| 691 |
-
### **Caching System** (`cache/`)
|
| 692 |
-
|
| 693 |
-
**Built with ❤️ for the future of safe, intelligent AI systems**Performance optimization and temporary storage:
|
| 694 |
-
|
| 695 |
-
- **Model Loading**: Reduce initialization time
|
| 696 |
-
|
| 697 |
-
*ATLES represents a new paradigm in AI development - combining safety, learning, and multi-platform accessibility in a single, comprehensive system.*- **Response Caching**: Improve repeated query performance
|
| 698 |
-
- **Memory Management**: Efficient resource utilization
|
| 699 |
-
- **Cleanup Automation**: Automatic cache management
|
| 700 |
-
|
| 701 |
-
### **Configuration Management**
|
| 702 |
-
System settings and architectural control:
|
| 703 |
-
- **Layer Configuration**: Enable/disable processing layers
|
| 704 |
-
- **Model Settings**: Per-model parameter customization
|
| 705 |
-
- **Safety Thresholds**: Constitutional AI sensitivity
|
| 706 |
-
- **Debug Modes**: Development and troubleshooting options
|
| 707 |
-
|
| 708 |
-
## 📊 System Monitoring & Analytics
|
| 709 |
-
|
| 710 |
-
### **Performance Metrics**
|
| 711 |
-
- **Response Times**: Track processing speed across models
|
| 712 |
-
- **Safety Triggers**: Monitor constitutional AI activations
|
| 713 |
-
- **Model Usage**: Analyze model selection patterns
|
| 714 |
-
- **Resource Utilization**: Memory and computational efficiency
|
| 715 |
-
|
| 716 |
-
### **Health Checks**
|
| 717 |
-
```python
|
| 718 |
-
from atles import get_architectural_status
|
| 719 |
-
|
| 720 |
-
status = get_architectural_status()
|
| 721 |
-
print(f"System Health: {status}")
|
| 722 |
-
|
| 723 |
-
# Check individual components
|
| 724 |
-
print(f"Source Verification: {status['source_verification']}")
|
| 725 |
-
print(f"Constitutional AI: {status['constitutional_active']}")
|
| 726 |
-
print(f"Model Count: {len(status['available_models'])}")
|
| 727 |
-
```
|
| 728 |
-
|
| 729 |
-
## 📈 System Features
|
| 730 |
-
|
| 731 |
-
### **🎯 Educational Focus**
|
| 732 |
-
- **Structured Learning**: Progressive difficulty levels
|
| 733 |
-
- **Concept Mapping**: Tagged and categorized content
|
| 734 |
-
- **Real-world Examples**: Production-quality code samples
|
| 735 |
-
|
| 736 |
-
### ** AI Model Management**
|
| 737 |
-
- **Multi-model Support**: Various model sizes and capabilities
|
| 738 |
-
- **Metadata Tracking**: Download status and performance metrics
|
| 739 |
-
- **Efficient Storage**: Optimized for large model files
|
| 740 |
-
|
| 741 |
-
### **📊 Data Organization**
|
| 742 |
-
- **Consistent Schema**: Standardized data formats
|
| 743 |
-
- **Search Optimization**: Tagged and scored content
|
| 744 |
-
- **Scalable Structure**: Easy to extend and modify
|
| 745 |
-
|
| 746 |
-
## Future Enhancements
|
| 747 |
-
|
| 748 |
-
- **Model Integration**: Direct model loading and inference
|
| 749 |
-
- **Web Interface**: Browser-based access to datasets
|
| 750 |
-
- **API Endpoints**: RESTful access to knowledge base
|
| 751 |
-
- **Learning Analytics**: Progress tracking and recommendations
|
| 752 |
-
- **Collaborative Features**: Community contributions and sharing
|
| 753 |
-
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: gemma
|
| 3 |
+
pipeline_tag: sentence-similarity
|
| 4 |
+
library_name: sentence-transformers
|
| 5 |
+
tags:
|
| 6 |
+
- sentence-transformers
|
| 7 |
+
- sentence-similarity
|
| 8 |
+
- feature-extraction
|
| 9 |
+
- text-embeddings-inference
|
| 10 |
+
extra_gated_heading: Access EmbeddingGemma on Hugging Face
|
| 11 |
+
extra_gated_prompt: To access EmbeddingGemma on Hugging Face, you’re required to review and
|
| 12 |
+
agree to Google’s usage license. To do this, please ensure you’re logged in to Hugging
|
| 13 |
+
Face and click below. Requests are processed immediately.
|
| 14 |
+
extra_gated_button_content: Acknowledge license
|
| 15 |
+
---
|
| 16 |
+
|
| 17 |
+
# EmbeddingGemma model card
|
| 18 |
+
|
| 19 |
+
**Model Page**: [EmbeddingGemma](https://ai.google.dev/gemma/docs/embeddinggemma)
|
| 20 |
+
|
| 21 |
+
**Resources and Technical Documentation**:
|
| 22 |
+
|
| 23 |
+
* [Responsible Generative AI Toolkit](https://ai.google.dev/responsible)
|
| 24 |
+
* [EmbeddingGemma on Kaggle](https://www.kaggle.com/models/google/embeddinggemma/)
|
| 25 |
+
* [EmbeddingGemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/embeddinggemma)
|
| 26 |
+
|
| 27 |
+
**Terms of Use**: [Terms](https://ai.google.dev/gemma/terms)
|
| 28 |
+
|
| 29 |
+
**Authors**: Google DeepMind
|
| 30 |
+
|
| 31 |
+
## Model Information
|
| 32 |
+
|
| 33 |
+
### Description
|
| 34 |
+
|
| 35 |
+
EmbeddingGemma is a 300M parameter, state-of-the-art for its size, open embedding model from Google, built from Gemma 3 (with T5Gemma initialization) and the same research and technology used to create Gemini models. EmbeddingGemma produces vector representations of text, making it well-suited for search and retrieval tasks, including classification, clustering, and semantic similarity search. This model was trained with data in 100+ spoken languages.
|
| 36 |
+
|
| 37 |
+
The small size and on-device focus makes it possible to deploy in environments with limited resources such as mobile phones, laptops, or desktops, democratizing access to state of the art AI models and helping foster innovation for everyone.
|
| 38 |
+
|
| 39 |
+
### Inputs and outputs
|
| 40 |
+
|
| 41 |
+
- **Input:**
|
| 42 |
+
- Text string, such as a question, a prompt, or a document to be embedded
|
| 43 |
+
- Maximum input context length of 2048 tokens
|
| 44 |
+
|
| 45 |
+
- **Output:**
|
| 46 |
+
- Numerical vector representations of input text data
|
| 47 |
+
- Output embedding dimension size of 768, with smaller options available (512, 256, or 128) via Matryoshka Representation Learning (MRL). MRL allows users to truncate the output embedding of size 768 to their desired size and then re-normalize for efficient and accurate representation.
|
| 48 |
+
|
| 49 |
+
### Usage
|
| 50 |
+
|
| 51 |
+
These model weights are designed to be used with [Sentence Transformers](https://www.SBERT.net), using the [Gemma 3](https://huggingface.co/docs/transformers/main/en/model_doc/gemma3) implementation from [Hugging Face Transformers](https://huggingface.co/docs/transformers/en/index) as the backbone.
|
| 52 |
+
|
| 53 |
+
First install the Sentence Transformers library:
|
| 54 |
+
|
| 55 |
+
```bash
|
| 56 |
+
pip install -U sentence-transformers
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
Then you can load this model and run inference.
|
| 60 |
+
|
| 61 |
+
```python
|
| 62 |
+
from sentence_transformers import SentenceTransformer
|
| 63 |
+
|
| 64 |
+
# Download from the 🤗 Hub
|
| 65 |
+
model = SentenceTransformer("google/embeddinggemma-300m")
|
| 66 |
+
|
| 67 |
+
# Run inference with queries and documents
|
| 68 |
+
query = "Which planet is known as the Red Planet?"
|
| 69 |
+
documents = [
|
| 70 |
+
"Venus is often called Earth's twin because of its similar size and proximity.",
|
| 71 |
+
"Mars, known for its reddish appearance, is often referred to as the Red Planet.",
|
| 72 |
+
"Jupiter, the largest planet in our solar system, has a prominent red spot.",
|
| 73 |
+
"Saturn, famous for its rings, is sometimes mistaken for the Red Planet."
|
| 74 |
+
]
|
| 75 |
+
query_embeddings = model.encode_query(query)
|
| 76 |
+
document_embeddings = model.encode_document(documents)
|
| 77 |
+
print(query_embeddings.shape, document_embeddings.shape)
|
| 78 |
+
# (768,) (4, 768)
|
| 79 |
+
|
| 80 |
+
# Compute similarities to determine a ranking
|
| 81 |
+
similarities = model.similarity(query_embeddings, document_embeddings)
|
| 82 |
+
print(similarities)
|
| 83 |
+
# tensor([[0.3011, 0.6359, 0.4930, 0.4889]])
|
| 84 |
+
```
|
| 85 |
+
|
| 86 |
+
**NOTE**: EmbeddingGemma activations do not support `float16`. Please use `float32` or `bfloat16` as appropriate for your hardware.
|
| 87 |
+
|
| 88 |
+
## Model Data
|
| 89 |
+
|
| 90 |
+
### Training Dataset
|
| 91 |
+
|
| 92 |
+
This model was trained on a dataset of text data that includes a wide variety of sources totaling approximately 320 billion tokens. Here are the key components:
|
| 93 |
+
|
| 94 |
+
- **Web Documents**: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. The training dataset includes content in over 100 languages.
|
| 95 |
+
- **Code and Technical Documents**: Exposing the model to code and technical documentation helps it learn the structure and patterns of programming languages and specialized scientific content, which improves its understanding of code and technical questions.
|
| 96 |
+
- **Synthetic and Task-Specific Data**: Synthetically training data helps to teach the model specific skills. This includes curated data for tasks like information retrieval, classification, and sentiment analysis, which helps to fine-tune its performance for common embedding applications.
|
| 97 |
+
|
| 98 |
+
The combination of these diverse data sources is crucial for training a powerful multilingual embedding model that can handle a wide variety of different tasks and data formats.
|
| 99 |
+
|
| 100 |
+
### Data Preprocessing
|
| 101 |
+
|
| 102 |
+
Here are the key data cleaning and filtering methods applied to the training data:
|
| 103 |
+
|
| 104 |
+
- CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content.
|
| 105 |
+
- Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets.
|
| 106 |
+
- Additional methods: Filtering based on content quality and safety in line with [our policies](https://ai.google/static/documents/ai-responsibility-update-published-february-2025.pdf).
|
| 107 |
+
|
| 108 |
+
## Model Development
|
| 109 |
+
|
| 110 |
+
### Hardware
|
| 111 |
+
|
| 112 |
+
EmbeddingGemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e), for more details refer to the [Gemma 3 model card](https://ai.google.dev/gemma/docs/core/model_card_3).
|
| 113 |
+
|
| 114 |
+
### Software
|
| 115 |
+
|
| 116 |
+
Training was done using [JAX](https://github.com/jax-ml/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/). For more details refer to the [Gemma 3 model card](https://ai.google.dev/gemma/docs/core/model_card_3).
|
| 117 |
+
|
| 118 |
+
## Evaluation
|
| 119 |
+
|
| 120 |
+
### Benchmark Results
|
| 121 |
+
|
| 122 |
+
The model was evaluated against a large collection of different datasets and metrics to cover different aspects of text understanding.
|
| 123 |
+
|
| 124 |
+
#### Full Precision Checkpoint
|
| 125 |
+
|
| 126 |
+
<table>
|
| 127 |
+
<thead>
|
| 128 |
+
<tr>
|
| 129 |
+
<th colspan="3"><strong>MTEB (Multilingual, v2)</strong></th>
|
| 130 |
+
</tr>
|
| 131 |
+
</thead>
|
| 132 |
+
<tbody>
|
| 133 |
+
<tr>
|
| 134 |
+
<td><strong>Dimensionality</strong></td>
|
| 135 |
+
<td><strong>Mean (Task)</strong></td>
|
| 136 |
+
<td><strong>Mean (TaskType)</strong></td>
|
| 137 |
+
</tr>
|
| 138 |
+
<tr>
|
| 139 |
+
<td>768d</td>
|
| 140 |
+
<td>61.15</td>
|
| 141 |
+
<td>54.31</td>
|
| 142 |
+
</tr>
|
| 143 |
+
<tr>
|
| 144 |
+
<td>512d</td>
|
| 145 |
+
<td>60.71</td>
|
| 146 |
+
<td>53.89</td>
|
| 147 |
+
</tr>
|
| 148 |
+
<tr>
|
| 149 |
+
<td>256d</td>
|
| 150 |
+
<td>59.68</td>
|
| 151 |
+
<td>53.01</td>
|
| 152 |
+
</tr>
|
| 153 |
+
<tr>
|
| 154 |
+
<td>128d</td>
|
| 155 |
+
<td>58.23</td>
|
| 156 |
+
<td>51.77</td>
|
| 157 |
+
</tr>
|
| 158 |
+
</tbody>
|
| 159 |
+
</table>
|
| 160 |
+
|
| 161 |
+
<table>
|
| 162 |
+
<thead>
|
| 163 |
+
<tr>
|
| 164 |
+
<th colspan="3"><strong>MTEB (English, v2)</strong></th>
|
| 165 |
+
</tr>
|
| 166 |
+
</thead>
|
| 167 |
+
<tbody>
|
| 168 |
+
<tr>
|
| 169 |
+
<td><strong>Dimensionality</strong></td>
|
| 170 |
+
<td><strong>Mean (Task)</strong></td>
|
| 171 |
+
<td><strong>Mean (TaskType)</strong></td>
|
| 172 |
+
</tr>
|
| 173 |
+
<tr>
|
| 174 |
+
<td>768d</td>
|
| 175 |
+
<td>68.36</td>
|
| 176 |
+
<td>64.15</td>
|
| 177 |
+
</tr>
|
| 178 |
+
<tr>
|
| 179 |
+
<td>512d</td>
|
| 180 |
+
<td>67.80</td>
|
| 181 |
+
<td>63.59</td>
|
| 182 |
+
</tr>
|
| 183 |
+
<tr>
|
| 184 |
+
<td>256d</td>
|
| 185 |
+
<td>66.89</td>
|
| 186 |
+
<td>62.94</td>
|
| 187 |
+
</tr>
|
| 188 |
+
<tr>
|
| 189 |
+
<td>128d</td>
|
| 190 |
+
<td>65.09</td>
|
| 191 |
+
<td>61.56</td>
|
| 192 |
+
</tr>
|
| 193 |
+
</tbody>
|
| 194 |
+
</table>
|
| 195 |
+
|
| 196 |
+
<table>
|
| 197 |
+
<thead>
|
| 198 |
+
<tr>
|
| 199 |
+
<th colspan="3"><strong>MTEB (Code, v1)</strong></th>
|
| 200 |
+
</tr>
|
| 201 |
+
</thead>
|
| 202 |
+
<tbody>
|
| 203 |
+
<tr>
|
| 204 |
+
<td><strong>Dimensionality</strong></td>
|
| 205 |
+
<td><strong>Mean (Task)</strong></td>
|
| 206 |
+
<td><strong>Mean (TaskType)</strong></td>
|
| 207 |
+
</tr>
|
| 208 |
+
<tr>
|
| 209 |
+
<td>768d</td>
|
| 210 |
+
<td>68.76</td>
|
| 211 |
+
<td>68.76</td>
|
| 212 |
+
</tr>
|
| 213 |
+
<tr>
|
| 214 |
+
<td>512d</td>
|
| 215 |
+
<td>68.48</td>
|
| 216 |
+
<td>68.48</td>
|
| 217 |
+
</tr>
|
| 218 |
+
<tr>
|
| 219 |
+
<td>256d</td>
|
| 220 |
+
<td>66.74</td>
|
| 221 |
+
<td>66.74</td>
|
| 222 |
+
</tr>
|
| 223 |
+
<tr>
|
| 224 |
+
<td>128d</td>
|
| 225 |
+
<td>62.96</td>
|
| 226 |
+
<td>62.96</td>
|
| 227 |
+
</tr>
|
| 228 |
+
</tbody>
|
| 229 |
+
</table>
|
| 230 |
+
|
| 231 |
+
#### QAT Checkpoints
|
| 232 |
+
|
| 233 |
+
<table>
|
| 234 |
+
<thead>
|
| 235 |
+
<tr>
|
| 236 |
+
<th colspan="3"><strong>MTEB (Multilingual, v2)</strong></th>
|
| 237 |
+
</tr>
|
| 238 |
+
</thead>
|
| 239 |
+
<tbody>
|
| 240 |
+
<tr>
|
| 241 |
+
<td><strong>Quant config (dimensionality)</strong></td>
|
| 242 |
+
<td><strong>Mean (Task)</strong></td>
|
| 243 |
+
<td><strong>Mean (TaskType)</strong></td>
|
| 244 |
+
</tr>
|
| 245 |
+
<tr>
|
| 246 |
+
<td>Q4_0 (768d)</td>
|
| 247 |
+
<td>60.62</td>
|
| 248 |
+
<td>53.61</td>
|
| 249 |
+
</tr>
|
| 250 |
+
<tr>
|
| 251 |
+
<td>Q8_0 (768d)</td>
|
| 252 |
+
<td>60.93</td>
|
| 253 |
+
<td>53.95</td>
|
| 254 |
+
</tr>
|
| 255 |
+
<tr>
|
| 256 |
+
<td>Mixed Precision* (768d)</td>
|
| 257 |
+
<td>60.69</td>
|
| 258 |
+
<td>53.82</td>
|
| 259 |
+
</tr>
|
| 260 |
+
</tbody>
|
| 261 |
+
</table>
|
| 262 |
+
|
| 263 |
+
<table>
|
| 264 |
+
<thead>
|
| 265 |
+
<tr>
|
| 266 |
+
<th colspan="3"><strong>MTEB (English, v2)</strong></th>
|
| 267 |
+
</tr>
|
| 268 |
+
</thead>
|
| 269 |
+
<tbody>
|
| 270 |
+
<tr>
|
| 271 |
+
<td><strong>Quant config (dimensionality)</strong></td>
|
| 272 |
+
<td><strong>Mean (Task)</strong></td>
|
| 273 |
+
<td><strong>Mean (TaskType)</strong></td>
|
| 274 |
+
</tr>
|
| 275 |
+
<tr>
|
| 276 |
+
<td>Q4_0 (768d)</td>
|
| 277 |
+
<td>67.91</td>
|
| 278 |
+
<td>63.64</td>
|
| 279 |
+
</tr>
|
| 280 |
+
<tr>
|
| 281 |
+
<td>Q8_0 (768d)</td>
|
| 282 |
+
<td>68.13</td>
|
| 283 |
+
<td>63.85</td>
|
| 284 |
+
</tr>
|
| 285 |
+
<tr>
|
| 286 |
+
<td>Mixed Precision* (768d)</td>
|
| 287 |
+
<td>67.95</td>
|
| 288 |
+
<td>63.83</td>
|
| 289 |
+
</tr>
|
| 290 |
+
</tbody>
|
| 291 |
+
</table>
|
| 292 |
+
|
| 293 |
+
<table>
|
| 294 |
+
<thead>
|
| 295 |
+
<tr>
|
| 296 |
+
<th colspan="3"><strong>MTEB (Code, v1)</strong></th>
|
| 297 |
+
</tr>
|
| 298 |
+
</thead>
|
| 299 |
+
<tbody>
|
| 300 |
+
<tr>
|
| 301 |
+
<td><strong>Quant config (dimensionality)</strong></td>
|
| 302 |
+
<td><strong>Mean (Task)</strong></td>
|
| 303 |
+
<td><strong>Mean (TaskType)</strong></td>
|
| 304 |
+
</tr>
|
| 305 |
+
<tr>
|
| 306 |
+
<td>Q4_0 (768d)</td>
|
| 307 |
+
<td>67.99</td>
|
| 308 |
+
<td>67.99</td>
|
| 309 |
+
</tr>
|
| 310 |
+
<tr>
|
| 311 |
+
<td>Q8_0 (768d)</td>
|
| 312 |
+
<td>68.70</td>
|
| 313 |
+
<td>68.70</td>
|
| 314 |
+
</tr>
|
| 315 |
+
<tr>
|
| 316 |
+
<td>Mixed Precision* (768d)</td>
|
| 317 |
+
<td>68.03</td>
|
| 318 |
+
<td>68.03</td>
|
| 319 |
+
</tr>
|
| 320 |
+
</tbody>
|
| 321 |
+
</table>
|
| 322 |
+
|
| 323 |
+
Note: QAT models are evaluated after quantization
|
| 324 |
+
|
| 325 |
+
\* Mixed Precision refers to per-channel quantization with int4 for embeddings, feedforward, and projection layers, and int8 for attention (e4_a8_f4_p4).
|
| 326 |
+
|
| 327 |
+
### Prompt Instructions
|
| 328 |
+
|
| 329 |
+
EmbeddingGemma can generate optimized embeddings for various use cases—such as document retrieval, question answering, and fact verification—or for specific input types—either a query or a document—using prompts that are prepended to the input strings.
|
| 330 |
+
Query prompts follow the form `task: {task description} | query: ` where the task description varies by the use case, with the default task description being `search result`. Document-style prompts follow the form `title: {title | "none"} | text: ` where the title is either `none` (the default) or the actual title of the document. Note that providing a title, if available, will improve model performance for document prompts but may require manual formatting.
|
| 331 |
+
|
| 332 |
+
Use the following prompts based on your use case and input data type. These may already be available in the EmbeddingGemma configuration in your modeling framework of choice.
|
| 333 |
+
|
| 334 |
+
<table>
|
| 335 |
+
<thead>
|
| 336 |
+
<tr>
|
| 337 |
+
<th><br>
|
| 338 |
+
<strong>Use Case (task type enum)</strong></th>
|
| 339 |
+
<th><br>
|
| 340 |
+
<strong>Descriptions</strong></th>
|
| 341 |
+
<th><br>
|
| 342 |
+
<strong>Recommended Prompt</strong></th>
|
| 343 |
+
</tr>
|
| 344 |
+
</thead>
|
| 345 |
+
<tbody>
|
| 346 |
+
<tr>
|
| 347 |
+
<td><br>
|
| 348 |
+
Retrieval (Query)</td>
|
| 349 |
+
<td rowspan="4"><br>
|
| 350 |
+
Used to generate embeddings that are optimized for document search or information retrieval</td>
|
| 351 |
+
<td><br>
|
| 352 |
+
task: search result | query: {content}</td>
|
| 353 |
+
</tr>
|
| 354 |
+
<tr>
|
| 355 |
+
<td><br>
|
| 356 |
+
Retrieval (Document)</td>
|
| 357 |
+
<td><br>
|
| 358 |
+
title: {title | "none"} | text: {content}</td>
|
| 359 |
+
</tr>
|
| 360 |
+
<tr>
|
| 361 |
+
<td><br>
|
| 362 |
+
Question Answering</td>
|
| 363 |
+
<td><br>
|
| 364 |
+
task: question answering | query: {content}</td>
|
| 365 |
+
</tr>
|
| 366 |
+
<tr>
|
| 367 |
+
<td><br>
|
| 368 |
+
Fact Verification</td>
|
| 369 |
+
<td><br>
|
| 370 |
+
task: fact checking | query: {content}</td>
|
| 371 |
+
</tr>
|
| 372 |
+
<tr>
|
| 373 |
+
<td><br>
|
| 374 |
+
Classification</td>
|
| 375 |
+
<td><br>
|
| 376 |
+
Used to generate embeddings that are optimized to classify texts according to preset labels</td>
|
| 377 |
+
<td><br>
|
| 378 |
+
task: classification | query: {content}</td>
|
| 379 |
+
</tr>
|
| 380 |
+
<tr>
|
| 381 |
+
<td><br>
|
| 382 |
+
Clustering</td>
|
| 383 |
+
<td><br>
|
| 384 |
+
Used to generate embeddings that are optimized to cluster texts based on their similarities</td>
|
| 385 |
+
<td><br>
|
| 386 |
+
task: clustering | query: {content}</td>
|
| 387 |
+
</tr>
|
| 388 |
+
<tr>
|
| 389 |
+
<td><br>
|
| 390 |
+
Semantic Similarity</td>
|
| 391 |
+
<td><br>
|
| 392 |
+
Used to generate embeddings that are optimized to assess text similarity. This is not intended for retrieval use cases.</td>
|
| 393 |
+
<td><br>
|
| 394 |
+
task: sentence similarity | query: {content}</td>
|
| 395 |
+
</tr>
|
| 396 |
+
<tr>
|
| 397 |
+
<td><br>
|
| 398 |
+
Code Retrieval</td>
|
| 399 |
+
<td><br>
|
| 400 |
+
Used to retrieve a code block based on a natural language query, such as <em>sort an array</em> or <em>reverse a linked list</em>. Embeddings of the code blocks are computed using retrieval_document.</td>
|
| 401 |
+
<td><br>
|
| 402 |
+
task: code retrieval | query: {content}</td>
|
| 403 |
+
</tr>
|
| 404 |
+
</tbody>
|
| 405 |
+
</table>
|
| 406 |
+
|
| 407 |
+
## Usage and Limitations
|
| 408 |
+
|
| 409 |
+
These models have certain limitations that users should be aware of.
|
| 410 |
+
|
| 411 |
+
### Intended Usage
|
| 412 |
+
|
| 413 |
+
Open embedding models have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development.
|
| 414 |
+
|
| 415 |
+
- **Semantic Similarity**: Embeddings optimized to assess text similarity, such as recommendation systems and duplicate detection
|
| 416 |
+
- **Classification**: Embeddings optimized to classify texts according to preset labels, such as sentiment analysis and spam detection
|
| 417 |
+
- **Clustering**: Embeddings optimized to cluster texts based on their similarities, such as document organization, market research, and anomaly detection
|
| 418 |
+
- **Retrieval**
|
| 419 |
+
- **Document**: Embeddings optimized for document search, such as indexing articles, books, or web pages for search
|
| 420 |
+
- **Query**: Embeddings optimized for general search queries, such as custom search
|
| 421 |
+
- **Code Query**: Embeddings optimized for retrieval of code blocks based on natural language queries, such as code suggestions and search
|
| 422 |
+
|
| 423 |
+
- **Question Answering**: Embeddings for questions in a question-answering system, optimized for finding documents that answer the question, such as chatbox.
|
| 424 |
+
- **Fact Verification**: Embeddings for statements that need to be verified, optimized for retrieving documents that contain evidence supporting or refuting the statement, such as automated fact-checking systems.
|
| 425 |
+
|
| 426 |
+
### Limitations
|
| 427 |
+
|
| 428 |
+
- Training Data
|
| 429 |
+
- The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses.
|
| 430 |
+
- The scope of the training dataset determines the subject areas the model can handle effectively.
|
| 431 |
+
|
| 432 |
+
- Language Ambiguity and Nuance
|
| 433 |
+
- Natural language is inherently complex. Models might struggle to grasp subtle nuances, sarcasm, or figurative language.
|
| 434 |
+
|
| 435 |
+
### Ethical Considerations and Risks
|
| 436 |
+
|
| 437 |
+
Risks identified and mitigations:
|
| 438 |
+
|
| 439 |
+
- **Perpetuation of biases**: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases.
|
| 440 |
+
- **Misuse for malicious purposes**: Technical limitations and developer and end-user education can help mitigate against malicious applications of embeddings. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy).
|
| 441 |
+
- **Privacy violations**: Models were trained on data filtered for removal of certain personal information and other sensitive data. Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques.
|
| 442 |
+
|
| 443 |
+
### Benefits
|
| 444 |
+
|
| 445 |
+
At the time of release, this family of models provides high-performance open embedding model implementations designed from the ground up for responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown superior performance to other, comparably-sized open model alternatives.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
added_tokens.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8730690481ac3d7ef2249793e7c876ca8d32c14821fe52cfdba1f8521b61c5c
|
| 3 |
+
size 38
|
config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:23dbfe8926df0ce1772e043e19187be221f09d98fb304bfbab4cbd768cc4df29
|
| 3 |
+
size 1548
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:634f2e132efb0663e994ed7ccd5222b8581fa3740116c011762f99cb3051e449
|
| 3 |
+
size 1022
|
generation_config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1d06192cb3ce75a1ede6cd50294ad74dac42bcd1ec603ced3f9fb97e922e3d25
|
| 3 |
+
size 140
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cbf5a78393b6a033e0b8a63a57549964f7ed5c6fbeb4ba0694214f36123f2fd2
|
| 3 |
+
size 1211486072
|
modules.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8676c7c131b4b510b0ec92a5e100deeb4889df8c964a2dfeb5cb7925287ba2db
|
| 3 |
+
size 604
|
notebook.ipynb
ADDED
|
@@ -0,0 +1,656 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "markdown",
|
| 5 |
+
"metadata": {
|
| 6 |
+
"id": "-u7xRR3DeFXz"
|
| 7 |
+
},
|
| 8 |
+
"source": [
|
| 9 |
+
"##### Copyright 2025 Google LLC."
|
| 10 |
+
]
|
| 11 |
+
},
|
| 12 |
+
{
|
| 13 |
+
"cell_type": "code",
|
| 14 |
+
"execution_count": null,
|
| 15 |
+
"metadata": {
|
| 16 |
+
"cellView": "form",
|
| 17 |
+
"id": "oed1Dh9SeIlD"
|
| 18 |
+
},
|
| 19 |
+
"outputs": [],
|
| 20 |
+
"source": [
|
| 21 |
+
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n",
|
| 22 |
+
"# you may not use this file except in compliance with the License.\n",
|
| 23 |
+
"# You may obtain a copy of the License at\n",
|
| 24 |
+
"#\n",
|
| 25 |
+
"# https://www.apache.org/licenses/LICENSE-2.0\n",
|
| 26 |
+
"#\n",
|
| 27 |
+
"# Unless required by applicable law or agreed to in writing, software\n",
|
| 28 |
+
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
|
| 29 |
+
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
|
| 30 |
+
"# See the License for the specific language governing permissions and\n",
|
| 31 |
+
"# limitations under the License."
|
| 32 |
+
]
|
| 33 |
+
},
|
| 34 |
+
{
|
| 35 |
+
"cell_type": "markdown",
|
| 36 |
+
"metadata": {
|
| 37 |
+
"id": "UpJl85mfqdUB"
|
| 38 |
+
},
|
| 39 |
+
"source": [
|
| 40 |
+
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n",
|
| 41 |
+
" <td>\n",
|
| 42 |
+
" <a target=\"_blank\" href=\"https://ai.google.dev/gemma/docs/embeddinggemma/inference-embeddinggemma-with-sentence-transformers\"><img src=\"https://ai.google.dev/static/site-assets/images/docs/notebook-site-button.png\" height=\"32\" width=\"32\" />View on ai.google.dev</a>\n",
|
| 43 |
+
" </td>\n",
|
| 44 |
+
" <td>\n",
|
| 45 |
+
" <a target=\"_blank\" href=\"https://colab.research.google.com/github/google/generative-ai-docs/blob/main/site/en/gemma/docs/embeddinggemma/inference-embeddinggemma-with-sentence-transformers.ipynb\"\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n",
|
| 46 |
+
" </td>\n",
|
| 47 |
+
" <td>\n",
|
| 48 |
+
" <a target=\"_blank\" href=\"https://kaggle.com/kernels/welcome?src=https://github.com/google/generative-ai-docs/blob/main/site/en/gemma/docs/embeddinggemma/inference-embeddinggemma-with-sentence-transformers.ipynb\"><img src=\"https://www.kaggle.com/static/images/logos/kaggle-logo-transparent-300.png\" height=\"32\" width=\"70\"/>Run in Kaggle</a>\n",
|
| 49 |
+
" </td>\n",
|
| 50 |
+
" <td>\n",
|
| 51 |
+
" <a target=\"_blank\" href=\"https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?download_url=https://raw.githubusercontent.com/google/generative-ai-docs/main/site/en/gemma/docs/embeddinggemma/inference-embeddinggemma-with-sentence-transformers.ipynb\"><img src=\"https://ai.google.dev/images/cloud-icon.svg\" width=\"40\" />Open in Vertex AI</a>\n",
|
| 52 |
+
" </td>\n",
|
| 53 |
+
" <td>\n",
|
| 54 |
+
" <a target=\"_blank\" href=\"https://github.com/google/generative-ai-docs/blob/main/site/en/gemma/docs/embeddinggemma/inference-embeddinggemma-with-sentence-transformers.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n",
|
| 55 |
+
" </td>\n",
|
| 56 |
+
"</table>"
|
| 57 |
+
]
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"cell_type": "markdown",
|
| 61 |
+
"metadata": {
|
| 62 |
+
"id": "Sq3lJyEiqqD-"
|
| 63 |
+
},
|
| 64 |
+
"source": [
|
| 65 |
+
"# Generate Embeddings with Sentence Transformers\n",
|
| 66 |
+
"\n",
|
| 67 |
+
"EmbeddingGemma is a lightweight, open embedding model designed for fast, high-quality retrieval on everyday devices like mobile phones. At only 308 million parameters, it's efficient enough to run advanced AI techniques, such as Retrieval Augmented Generation (RAG), directly on your local machine with no internet connection required.\n",
|
| 68 |
+
"\n",
|
| 69 |
+
"## Setup\n",
|
| 70 |
+
"\n",
|
| 71 |
+
"Before starting this tutorial, complete the following steps:\n",
|
| 72 |
+
"\n",
|
| 73 |
+
"* Get access to Gemma by logging into [Hugging Face](https://huggingface.co/google/embeddinggemma-300M) and selecting **Acknowledge license** for a Gemma model.\n",
|
| 74 |
+
"* Generate a Hugging Face [Access Token](https://huggingface.co/docs/hub/en/security-tokens#how-to-manage-user-access-token) and use it to login from Colab.\n",
|
| 75 |
+
"\n",
|
| 76 |
+
"This notebook will run on either CPU or GPU."
|
| 77 |
+
]
|
| 78 |
+
},
|
| 79 |
+
{
|
| 80 |
+
"cell_type": "markdown",
|
| 81 |
+
"metadata": {
|
| 82 |
+
"id": "R3TOEqprq-X3"
|
| 83 |
+
},
|
| 84 |
+
"source": [
|
| 85 |
+
"### Install Python packages\n",
|
| 86 |
+
"\n",
|
| 87 |
+
"Install the libraries required for running the EmbeddingGemma model and generating embeddings. Sentence Transformers is a Python framework for text and image embeddings. For more information, see the [Sentence Transformers](https://www.sbert.net/) documentation."
|
| 88 |
+
]
|
| 89 |
+
},
|
| 90 |
+
{
|
| 91 |
+
"cell_type": "code",
|
| 92 |
+
"execution_count": null,
|
| 93 |
+
"metadata": {
|
| 94 |
+
"id": "jZFuhT3nrHEK"
|
| 95 |
+
},
|
| 96 |
+
"outputs": [],
|
| 97 |
+
"source": [
|
| 98 |
+
"!pip install -U sentence-transformers git+https://github.com/huggingface/transformers@v4.56.0-Embedding-Gemma-preview"
|
| 99 |
+
]
|
| 100 |
+
},
|
| 101 |
+
{
|
| 102 |
+
"cell_type": "markdown",
|
| 103 |
+
"metadata": {
|
| 104 |
+
"id": "O3ttIyfSA0Lj"
|
| 105 |
+
},
|
| 106 |
+
"source": [
|
| 107 |
+
"After you have accepted the license, you need a valid Hugging Face Token to access the model."
|
| 108 |
+
]
|
| 109 |
+
},
|
| 110 |
+
{
|
| 111 |
+
"cell_type": "code",
|
| 112 |
+
"execution_count": null,
|
| 113 |
+
"metadata": {
|
| 114 |
+
"id": "WXK1Ev1Sq2iY"
|
| 115 |
+
},
|
| 116 |
+
"outputs": [],
|
| 117 |
+
"source": [
|
| 118 |
+
"# Login into Hugging Face Hub\n",
|
| 119 |
+
"from huggingface_hub import login\n",
|
| 120 |
+
"login()"
|
| 121 |
+
]
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"cell_type": "markdown",
|
| 125 |
+
"metadata": {
|
| 126 |
+
"id": "NUydcaDBrXDi"
|
| 127 |
+
},
|
| 128 |
+
"source": [
|
| 129 |
+
"### Load Model\n",
|
| 130 |
+
"\n",
|
| 131 |
+
"Use the `sentence-transformers` libraries to create an instance of a model class with EmbeddingGemma."
|
| 132 |
+
]
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"cell_type": "code",
|
| 136 |
+
"execution_count": null,
|
| 137 |
+
"metadata": {
|
| 138 |
+
"id": "mkpmqlU_rcOd",
|
| 139 |
+
"outputId": "f8458e59-9a6e-4a89-af83-ffdf391c323a"
|
| 140 |
+
},
|
| 141 |
+
"outputs": [
|
| 142 |
+
{
|
| 143 |
+
"name": "stdout",
|
| 144 |
+
"output_type": "stream",
|
| 145 |
+
"text": [
|
| 146 |
+
"Device: cuda:0\n",
|
| 147 |
+
"SentenceTransformer(\n",
|
| 148 |
+
" (0): Transformer({'max_seq_length': 2048, 'do_lower_case': False, 'architecture': 'Gemma3TextModel'})\n",
|
| 149 |
+
" (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n",
|
| 150 |
+
" (2): Dense({'in_features': 768, 'out_features': 3072, 'bias': False, 'activation_function': 'torch.nn.modules.linear.Identity'})\n",
|
| 151 |
+
" (3): Dense({'in_features': 3072, 'out_features': 768, 'bias': False, 'activation_function': 'torch.nn.modules.linear.Identity'})\n",
|
| 152 |
+
" (4): Normalize()\n",
|
| 153 |
+
")\n",
|
| 154 |
+
"Total number of parameters in the model: 307581696\n"
|
| 155 |
+
]
|
| 156 |
+
}
|
| 157 |
+
],
|
| 158 |
+
"source": [
|
| 159 |
+
"import torch\n",
|
| 160 |
+
"from sentence_transformers import SentenceTransformer\n",
|
| 161 |
+
"\n",
|
| 162 |
+
"device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
|
| 163 |
+
"\n",
|
| 164 |
+
"model_id = \"google/embeddinggemma-300M\"\n",
|
| 165 |
+
"model = SentenceTransformer(model_id).to(device=device)\n",
|
| 166 |
+
"\n",
|
| 167 |
+
"print(f\"Device: {model.device}\")\n",
|
| 168 |
+
"print(model)\n",
|
| 169 |
+
"print(\"Total number of parameters in the model:\", sum([p.numel() for _, p in model.named_parameters()]))"
|
| 170 |
+
]
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"cell_type": "markdown",
|
| 174 |
+
"metadata": {
|
| 175 |
+
"id": "JxrZ8na0A7Hv"
|
| 176 |
+
},
|
| 177 |
+
"source": [
|
| 178 |
+
"## Generating Embedding\n",
|
| 179 |
+
"\n",
|
| 180 |
+
"An embedding is a numerical representation of text, like a word or sentence, that captures its semantic meaning. Essentially, it's a list of numbers (a vector) that allows computers to understand the relationships and context of words.\n",
|
| 181 |
+
"\n",
|
| 182 |
+
"Let's see how EmbeddingGemma would process three different words `[\"apple\", \"banana\", \"car\"]`.\n",
|
| 183 |
+
"\n",
|
| 184 |
+
"EmbeddingGemma has been trained on vast amounts of text and has learned the relationships between words and concepts."
|
| 185 |
+
]
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"cell_type": "code",
|
| 189 |
+
"execution_count": null,
|
| 190 |
+
"metadata": {
|
| 191 |
+
"id": "o0UK8UVAA9b7",
|
| 192 |
+
"outputId": "37c91847-57de-4a47-9c1a-0adffacd1867"
|
| 193 |
+
},
|
| 194 |
+
"outputs": [
|
| 195 |
+
{
|
| 196 |
+
"name": "stdout",
|
| 197 |
+
"output_type": "stream",
|
| 198 |
+
"text": [
|
| 199 |
+
"[[-0.18476306 0.00167681 0.03773484 ... -0.07996225 -0.02348064\n",
|
| 200 |
+
" 0.00976741]\n",
|
| 201 |
+
" [-0.21189538 -0.02657359 0.02513712 ... -0.08042689 -0.01999852\n",
|
| 202 |
+
" 0.00512146]\n",
|
| 203 |
+
" [-0.18924113 -0.02551468 0.04486253 ... -0.06377774 -0.03699806\n",
|
| 204 |
+
" 0.03973572]]\n",
|
| 205 |
+
"Embedding 1: (768,)\n",
|
| 206 |
+
"Embedding 2: (768,)\n",
|
| 207 |
+
"Embedding 3: (768,)\n"
|
| 208 |
+
]
|
| 209 |
+
}
|
| 210 |
+
],
|
| 211 |
+
"source": [
|
| 212 |
+
"words = [\"apple\", \"banana\", \"car\"]\n",
|
| 213 |
+
"\n",
|
| 214 |
+
"# Calculate embeddings by calling model.encode()\n",
|
| 215 |
+
"embeddings = model.encode(words)\n",
|
| 216 |
+
"\n",
|
| 217 |
+
"print(embeddings)\n",
|
| 218 |
+
"for idx, embedding in enumerate(embeddings):\n",
|
| 219 |
+
" print(f\"Embedding {idx+1} (shape): {embedding.shape}\")"
|
| 220 |
+
]
|
| 221 |
+
},
|
| 222 |
+
{
|
| 223 |
+
"cell_type": "markdown",
|
| 224 |
+
"metadata": {
|
| 225 |
+
"id": "inuWOAuMBAR7"
|
| 226 |
+
},
|
| 227 |
+
"source": [
|
| 228 |
+
"The model outpus a numerical vector for each sentence. The actual vectors are very long (768), but for simplicity, those are presented with a few dimensions.\n",
|
| 229 |
+
"\n",
|
| 230 |
+
"The key isn't the individual numbers themselves, but **the distance between the vectors**. If we were to plot these vectors in a multi-dimensional space, The vectors for `apple` and `banana` would be very close to each other. And the vector for `car` would be far away from the other two."
|
| 231 |
+
]
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"cell_type": "markdown",
|
| 235 |
+
"metadata": {
|
| 236 |
+
"id": "2oCpMMJUr4RT"
|
| 237 |
+
},
|
| 238 |
+
"source": [
|
| 239 |
+
"## Determining Similarity\n",
|
| 240 |
+
"\n",
|
| 241 |
+
"In this section, we use embeddings to determine how sementically similar different sentences are. Here we show examples with high, medieum, and low similarity scores.\n",
|
| 242 |
+
"\n",
|
| 243 |
+
"- High Similarity:\n",
|
| 244 |
+
" - Sentence A: \"The chef prepared a delicious meal for the guests.\"\n",
|
| 245 |
+
" - Sentence B: \"A tasty dinner was cooked by the chef for the visitors.\"\n",
|
| 246 |
+
" - Reasoning: Both sentences describe the same event using different words and grammatical structures (active vs. passive voice). They convey the same core meaning.\n",
|
| 247 |
+
"\n",
|
| 248 |
+
"- Medium Similarity:\n",
|
| 249 |
+
" - Sentence A: \"She is an expert in machine learning.\"\n",
|
| 250 |
+
" - Sentence B: \"He has a deep interest in artificial intelligence.\"\n",
|
| 251 |
+
" - Reasoning: The sentences are related as machine learning is a subfield of artificial intelligence. However, they talk about different people with different levels of engagement (expert vs. interest).\n",
|
| 252 |
+
"\n",
|
| 253 |
+
"- Low Similarity:\n",
|
| 254 |
+
" - Sentence A: \"The weather in Tokyo is sunny today.\"\n",
|
| 255 |
+
" - Sentence B: \"I need to buy groceries for the week.\"\n",
|
| 256 |
+
" - Reasoning: The two sentences are on completely unrelated topics and share no semantic overlap."
|
| 257 |
+
]
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"cell_type": "code",
|
| 261 |
+
"execution_count": null,
|
| 262 |
+
"metadata": {
|
| 263 |
+
"id": "VeTEvnTyslyq",
|
| 264 |
+
"outputId": "b387529f-aad8-4150-e4f1-daef4f30cfc0"
|
| 265 |
+
},
|
| 266 |
+
"outputs": [
|
| 267 |
+
{
|
| 268 |
+
"name": "stdout",
|
| 269 |
+
"output_type": "stream",
|
| 270 |
+
"text": [
|
| 271 |
+
"🙋♂️\n",
|
| 272 |
+
"['The chef prepared a delicious meal for the guests.', 'A tasty dinner was cooked by the chef for the visitors.']\n",
|
| 273 |
+
"`-> 🤖 score: 0.8002148\n",
|
| 274 |
+
"🙋♂️\n",
|
| 275 |
+
"['She is an expert in machine learning.', 'He has a deep interest in artificial intelligence.']\n",
|
| 276 |
+
"`-> 🤖 score: 0.45417833\n",
|
| 277 |
+
"🙋♂️\n",
|
| 278 |
+
"['The weather in Tokyo is sunny today.', 'I need to buy groceries for the week.']\n",
|
| 279 |
+
"`-> 🤖 score: 0.22262995\n"
|
| 280 |
+
]
|
| 281 |
+
}
|
| 282 |
+
],
|
| 283 |
+
"source": [
|
| 284 |
+
"# The sentences to encode\n",
|
| 285 |
+
"sentence_high = [\n",
|
| 286 |
+
" \"The chef prepared a delicious meal for the guests.\",\n",
|
| 287 |
+
" \"A tasty dinner was cooked by the chef for the visitors.\"\n",
|
| 288 |
+
"]\n",
|
| 289 |
+
"sentence_medium = [\n",
|
| 290 |
+
" \"She is an expert in machine learning.\",\n",
|
| 291 |
+
" \"He has a deep interest in artificial intelligence.\"\n",
|
| 292 |
+
"]\n",
|
| 293 |
+
"sentence_low = [\n",
|
| 294 |
+
" \"The weather in Tokyo is sunny today.\",\n",
|
| 295 |
+
" \"I need to buy groceries for the week.\"\n",
|
| 296 |
+
"]\n",
|
| 297 |
+
"\n",
|
| 298 |
+
"for sentence in [sentence_high, sentence_medium, sentence_low]:\n",
|
| 299 |
+
" print(\"🙋♂️\")\n",
|
| 300 |
+
" print(sentence)\n",
|
| 301 |
+
" embeddings = model.encode(sentence)\n",
|
| 302 |
+
" similarities = model.similarity(embeddings[0], embeddings[1])\n",
|
| 303 |
+
" print(\"`-> 🤖 score: \", similarities.numpy()[0][0])"
|
| 304 |
+
]
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"cell_type": "markdown",
|
| 308 |
+
"metadata": {
|
| 309 |
+
"id": "obfUiizULZE0"
|
| 310 |
+
},
|
| 311 |
+
"source": [
|
| 312 |
+
"### Using Prompts with EmbeddingGemma\n",
|
| 313 |
+
"\n",
|
| 314 |
+
"To generate the best embeddings with EmbeddingGemma, you should add an \"instructional prompt\" or \"task\" to the beginning of your input text. These prompts optimize the embeddings for specific tasks, such as document retrieval or question answering, and help the model distinguish between different input types, like a search query versus a document.\n",
|
| 315 |
+
"\n",
|
| 316 |
+
"#### How to Apply Prompts\n",
|
| 317 |
+
"\n",
|
| 318 |
+
"You can apply a prompt during inference in three ways.\n",
|
| 319 |
+
"\n",
|
| 320 |
+
"1. **Using the `prompt` argument**<br>\n",
|
| 321 |
+
" Pass the full prompt string directly to the `encode` method. This gives you precise control.\n",
|
| 322 |
+
" ```python\n",
|
| 323 |
+
" embeddings = model.encode(\n",
|
| 324 |
+
" sentence,\n",
|
| 325 |
+
" prompt=\"task: sentence similarity | query: \"\n",
|
| 326 |
+
" )\n",
|
| 327 |
+
" ```\n",
|
| 328 |
+
"2. **Using the `prompt_name` argument**<br>\n",
|
| 329 |
+
" Select a predefined prompt by its name. These prompts are loaded from the model's configuration or during its initialization.\n",
|
| 330 |
+
" ```python\n",
|
| 331 |
+
" embeddings = model.encode(sentence, prompt_name=\"STS\")\n",
|
| 332 |
+
" ```\n",
|
| 333 |
+
"3. **Using the Default Prompt**<br>\n",
|
| 334 |
+
" If you don't specify either `prompt` or `prompt_name`, the system will automatically use the prompt set as `default_prompt_name`, if no default is set, then no prompt is applied.\n",
|
| 335 |
+
" ```python\n",
|
| 336 |
+
" embeddings = model.encode(sentence)\n",
|
| 337 |
+
" ```\n"
|
| 338 |
+
]
|
| 339 |
+
},
|
| 340 |
+
{
|
| 341 |
+
"cell_type": "code",
|
| 342 |
+
"execution_count": null,
|
| 343 |
+
"metadata": {
|
| 344 |
+
"id": "0p3qe3WDJV-I",
|
| 345 |
+
"outputId": "5fa2638e-e67b-479b-fba4-ca89a22cd10e"
|
| 346 |
+
},
|
| 347 |
+
"outputs": [
|
| 348 |
+
{
|
| 349 |
+
"name": "stdout",
|
| 350 |
+
"output_type": "stream",
|
| 351 |
+
"text": [
|
| 352 |
+
"Available tasks:\n",
|
| 353 |
+
" query: \"task: search result | query: \"\n",
|
| 354 |
+
" document: \"title: none | text: \"\n",
|
| 355 |
+
" BitextMining: \"task: search result | query: \"\n",
|
| 356 |
+
" Clustering: \"task: clustering | query: \"\n",
|
| 357 |
+
" Classification: \"task: classification | query: \"\n",
|
| 358 |
+
" InstructionRetrieval: \"task: code retrieval | query: \"\n",
|
| 359 |
+
" MultilabelClassification: \"task: classification | query: \"\n",
|
| 360 |
+
" PairClassification: \"task: sentence similarity | query: \"\n",
|
| 361 |
+
" Reranking: \"task: search result | query: \"\n",
|
| 362 |
+
" Retrieval: \"task: search result | query: \"\n",
|
| 363 |
+
" Retrieval-query: \"task: search result | query: \"\n",
|
| 364 |
+
" Retrieval-document: \"title: none | text: \"\n",
|
| 365 |
+
" STS: \"task: sentence similarity | query: \"\n",
|
| 366 |
+
" Summarization: \"task: summarization | query: \"\n",
|
| 367 |
+
"--------------------------------------------------------------------------------\n",
|
| 368 |
+
"🙋♂️\n",
|
| 369 |
+
"['The chef prepared a delicious meal for the guests.', 'A tasty dinner was cooked by the chef for the visitors.']\n",
|
| 370 |
+
"`-> 🤖 score: 0.9363755\n",
|
| 371 |
+
"🙋♂️\n",
|
| 372 |
+
"['She is an expert in machine learning.', 'He has a deep interest in artificial intelligence.']\n",
|
| 373 |
+
"`-> 🤖 score: 0.6425841\n",
|
| 374 |
+
"🙋♂️\n",
|
| 375 |
+
"['The weather in Tokyo is sunny today.', 'I need to buy groceries for the week.']\n",
|
| 376 |
+
"`-> 🤖 score: 0.38587403\n"
|
| 377 |
+
]
|
| 378 |
+
}
|
| 379 |
+
],
|
| 380 |
+
"source": [
|
| 381 |
+
"print(\"Available tasks:\")\n",
|
| 382 |
+
"for name, prefix in model.prompts.items():\n",
|
| 383 |
+
" print(f\" {name}: \\\"{prefix}\\\"\")\n",
|
| 384 |
+
"print(\"-\"*80)\n",
|
| 385 |
+
"\n",
|
| 386 |
+
"for sentence in [sentence_high, sentence_medium, sentence_low]:\n",
|
| 387 |
+
" print(\"🙋♂️\")\n",
|
| 388 |
+
" print(sentence)\n",
|
| 389 |
+
" embeddings = model.encode(sentence, prompt_name=\"STS\")\n",
|
| 390 |
+
" similarities = model.similarity(embeddings[0], embeddings[1])\n",
|
| 391 |
+
" print(\"`-> 🤖 score: \", similarities.numpy()[0][0])\n"
|
| 392 |
+
]
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"cell_type": "markdown",
|
| 396 |
+
"metadata": {
|
| 397 |
+
"id": "2YAqPXDctw2w"
|
| 398 |
+
},
|
| 399 |
+
"source": [
|
| 400 |
+
"#### Use Case: Retrieval-Augmented Generation (RAG)\n",
|
| 401 |
+
"\n",
|
| 402 |
+
"For RAG systems, use the following `prompt_name` values to create specialized embeddings for your queries and documents:\n",
|
| 403 |
+
"\n",
|
| 404 |
+
"* **For Queries:** Use `prompt_name=\"Retrieval-query\"`.<br>\n",
|
| 405 |
+
" ```python\n",
|
| 406 |
+
" query_embedding = model.encode(\n",
|
| 407 |
+
" \"How do I use prompts with this model?\",\n",
|
| 408 |
+
" prompt_name=\"Retrieval-query\"\n",
|
| 409 |
+
" )\n",
|
| 410 |
+
" ```\n",
|
| 411 |
+
"\n",
|
| 412 |
+
"* **For Documents:** Use `prompt_name=\"Retrieval-document\"`. To further improve document embeddings, you can also include a title by using the `prompt` argument directly:<br>\n",
|
| 413 |
+
" * **With a title:**<br>\n",
|
| 414 |
+
" ```python\n",
|
| 415 |
+
" doc_embedding = model.encode(\n",
|
| 416 |
+
" \"The document text...\",\n",
|
| 417 |
+
" prompt=\"title: Using Prompts in RAG | text: \"\n",
|
| 418 |
+
" )\n",
|
| 419 |
+
" ```\n",
|
| 420 |
+
" * **Without a title:**<br>\n",
|
| 421 |
+
" ```python\n",
|
| 422 |
+
" doc_embedding = model.encode(\n",
|
| 423 |
+
" \"The document text...\",\n",
|
| 424 |
+
" prompt=\"title: none | text: \"\n",
|
| 425 |
+
" )\n",
|
| 426 |
+
" ```\n",
|
| 427 |
+
"\n",
|
| 428 |
+
"#### Further Reading\n",
|
| 429 |
+
"\n",
|
| 430 |
+
"* For details on all available EmbeddingGemma prompts, see the [model card](http://ai.google.dev/gemma/docs/embeddinggemma/model_card#prompt_instructions).\n",
|
| 431 |
+
"* For general information on prompt templates, see the [Sentence Transformer documentation](https://sbert.net/examples/sentence_transformer/applications/computing-embeddings/README.html#prompt-templates).\n",
|
| 432 |
+
"* For a demo of RAG, see the [Simple RAG example](https://github.com/google-gemini/gemma-cookbook/blob/main/Gemma/%5BGemma_3%5DRAG_with_EmbeddingGemma.ipynb) in the Gemma Cookbook.\n"
|
| 433 |
+
]
|
| 434 |
+
},
|
| 435 |
+
{
|
| 436 |
+
"cell_type": "markdown",
|
| 437 |
+
"metadata": {
|
| 438 |
+
"id": "aQh-QFAPsswb"
|
| 439 |
+
},
|
| 440 |
+
"source": [
|
| 441 |
+
"## Classification\n",
|
| 442 |
+
"\n",
|
| 443 |
+
"Classification is the task of assigning a piece of text to one or more predefined categories or labels. It's one of the most fundamental tasks in Natural Language Processing (NLP).\n",
|
| 444 |
+
"\n",
|
| 445 |
+
"A practical application of text classification is customer support ticket routing. This process automatically directs customer queries to the correct department, saving time and reducing manual work."
|
| 446 |
+
]
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"cell_type": "code",
|
| 450 |
+
"execution_count": null,
|
| 451 |
+
"metadata": {
|
| 452 |
+
"id": "C2Ufawl-tXvr",
|
| 453 |
+
"outputId": "347bd68c-dfee-470d-eef7-e3af5d096e91"
|
| 454 |
+
},
|
| 455 |
+
"outputs": [
|
| 456 |
+
{
|
| 457 |
+
"name": "stdout",
|
| 458 |
+
"output_type": "stream",
|
| 459 |
+
"text": [
|
| 460 |
+
"tensor([[0.4673, 0.5145, 0.3604],\n",
|
| 461 |
+
" [0.4191, 0.5010, 0.5966]])\n",
|
| 462 |
+
"tensor([1, 2])\n",
|
| 463 |
+
"🙋♂️ Excuse me, the app freezes on the login screen. It won't work even when I try to reset my password. -> 🤖 Technical Support\n",
|
| 464 |
+
"🙋♂️ I would like to inquire about your enterprise plan pricing and features for a team of 50 people. -> 🤖 Sales Inquiry\n"
|
| 465 |
+
]
|
| 466 |
+
}
|
| 467 |
+
],
|
| 468 |
+
"source": [
|
| 469 |
+
"labels = [\"Billing Issue\", \"Technical Support\", \"Sales Inquiry\"]\n",
|
| 470 |
+
"\n",
|
| 471 |
+
"sentence = [\n",
|
| 472 |
+
" \"Excuse me, the app freezes on the login screen. It won't work even when I try to reset my password.\",\n",
|
| 473 |
+
" \"I would like to inquire about your enterprise plan pricing and features for a team of 50 people.\",\n",
|
| 474 |
+
"]\n",
|
| 475 |
+
"\n",
|
| 476 |
+
"# Calculate embeddings by calling model.encode()\n",
|
| 477 |
+
"label_embeddings = model.encode(labels, prompt_name=\"Classification\")\n",
|
| 478 |
+
"embeddings = model.encode(sentence, prompt_name=\"Classification\")\n",
|
| 479 |
+
"\n",
|
| 480 |
+
"# Calculate the embedding similarities\n",
|
| 481 |
+
"similarities = model.similarity(embeddings, label_embeddings)\n",
|
| 482 |
+
"print(similarities)\n",
|
| 483 |
+
"\n",
|
| 484 |
+
"idx = similarities.argmax(1)\n",
|
| 485 |
+
"print(idx)\n",
|
| 486 |
+
"\n",
|
| 487 |
+
"for example in sentence:\n",
|
| 488 |
+
" print(\"🙋♂️\", example, \"-> 🤖\", labels[idx[sentence.index(example)]])"
|
| 489 |
+
]
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"cell_type": "markdown",
|
| 493 |
+
"metadata": {
|
| 494 |
+
"id": "IRUU2EIDPSmW"
|
| 495 |
+
},
|
| 496 |
+
"source": [
|
| 497 |
+
"## Matryoshka Representation Learning (MRL)\n",
|
| 498 |
+
"\n",
|
| 499 |
+
"EmbeddingGemma leverages MRL to provide multiple embedding sizes from one model. It's a clever training method that creates a single, high-quality embedding where the most important information is concentrated at the beginning of the vector.\n",
|
| 500 |
+
"\n",
|
| 501 |
+
"This means you can get a smaller but still very useful embedding by simply taking the first `N` dimensions of the full embedding. Using smaller, truncated embeddings is significantly cheaper to store and faster to process, but this efficiency comes at the cost of potential lower quality of embeddings. MRL gives you the power to choose the optimal balance between this speed and accuracy for your application's specific needs.\n",
|
| 502 |
+
"\n",
|
| 503 |
+
"Let's use three words `[\"apple\", \"banana\", \"car\"]` and create simplified embeddings to see how MRL works."
|
| 504 |
+
]
|
| 505 |
+
},
|
| 506 |
+
{
|
| 507 |
+
"cell_type": "code",
|
| 508 |
+
"execution_count": null,
|
| 509 |
+
"metadata": {
|
| 510 |
+
"id": "B1q1F9I5PYSq",
|
| 511 |
+
"outputId": "a5b28e04-4783-4d79-ae82-3fac7e554a7a"
|
| 512 |
+
},
|
| 513 |
+
"outputs": [
|
| 514 |
+
{
|
| 515 |
+
"name": "stdout",
|
| 516 |
+
"output_type": "stream",
|
| 517 |
+
"text": [
|
| 518 |
+
"similarity function: cosine\n",
|
| 519 |
+
"tensor([[0.7510, 0.6685]])\n",
|
| 520 |
+
"🙋♂️ apple vs. banana -> 🤖 score: 0.75102395\n",
|
| 521 |
+
"🙋♂️ apple vs. car -> 🤖 score: 0.6684626\n"
|
| 522 |
+
]
|
| 523 |
+
}
|
| 524 |
+
],
|
| 525 |
+
"source": [
|
| 526 |
+
"def check_word_similarities():\n",
|
| 527 |
+
" # Calculate the embedding similarities\n",
|
| 528 |
+
" print(\"similarity function: \", model.similarity_fn_name)\n",
|
| 529 |
+
" similarities = model.similarity(embeddings[0], embeddings[1:])\n",
|
| 530 |
+
" print(similarities)\n",
|
| 531 |
+
"\n",
|
| 532 |
+
" for idx, word in enumerate(words[1:]):\n",
|
| 533 |
+
" print(\"🙋♂️ apple vs.\", word, \"-> 🤖 score: \", similarities.numpy()[0][idx])\n",
|
| 534 |
+
"\n",
|
| 535 |
+
"# Calculate embeddings by calling model.encode()\n",
|
| 536 |
+
"embeddings = model.encode(words, prompt_name=\"STS\")\n",
|
| 537 |
+
"\n",
|
| 538 |
+
"check_word_similarities()"
|
| 539 |
+
]
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"cell_type": "markdown",
|
| 543 |
+
"metadata": {
|
| 544 |
+
"id": "_iv1xG0TPxkm"
|
| 545 |
+
},
|
| 546 |
+
"source": [
|
| 547 |
+
"Now, for a faster application, you don't need a new model. Simply **truncate** the full embeddings to the first **512 dimensions**. For optimal results, it is also recommended to set `normalize_embeddings=True`, which scales the vectors to a unit length of 1."
|
| 548 |
+
]
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"cell_type": "code",
|
| 552 |
+
"execution_count": null,
|
| 553 |
+
"metadata": {
|
| 554 |
+
"id": "9Ue4aWh8PzdL",
|
| 555 |
+
"outputId": "176dabd4-9d9c-4ce9-c7e5-472ba47ed55f"
|
| 556 |
+
},
|
| 557 |
+
"outputs": [
|
| 558 |
+
{
|
| 559 |
+
"name": "stdout",
|
| 560 |
+
"output_type": "stream",
|
| 561 |
+
"text": [
|
| 562 |
+
"Embedding 1: (512,)\n",
|
| 563 |
+
"Embedding 2: (512,)\n",
|
| 564 |
+
"Embedding 3: (512,)\n",
|
| 565 |
+
"--------------------------------------------------------------------------------\n",
|
| 566 |
+
"similarity function: cosine\n",
|
| 567 |
+
"tensor([[0.7674, 0.7041]])\n",
|
| 568 |
+
"🙋♂️ apple vs. banana -> 🤖 score: 0.767427\n",
|
| 569 |
+
"🙋♂️ apple vs. car -> 🤖 score: 0.7040509\n"
|
| 570 |
+
]
|
| 571 |
+
}
|
| 572 |
+
],
|
| 573 |
+
"source": [
|
| 574 |
+
"embeddings = model.encode(words, truncate_dim=512, normalize_embeddings=True)\n",
|
| 575 |
+
"\n",
|
| 576 |
+
"for idx, embedding in enumerate(embeddings):\n",
|
| 577 |
+
" print(f\"Embedding {idx+1}: {embedding.shape}\")\n",
|
| 578 |
+
"\n",
|
| 579 |
+
"print(\"-\"*80)\n",
|
| 580 |
+
"check_word_similarities()"
|
| 581 |
+
]
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"cell_type": "markdown",
|
| 585 |
+
"metadata": {
|
| 586 |
+
"id": "lgkmgzfVP24M"
|
| 587 |
+
},
|
| 588 |
+
"source": [
|
| 589 |
+
"In extremely constrained environments, you can further shorten the embeddings to just **256 dimensions**. You can also use the more efficient **dot-product** for similarity calculations instead of the standard **cosine** similarity."
|
| 590 |
+
]
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"cell_type": "code",
|
| 594 |
+
"execution_count": null,
|
| 595 |
+
"metadata": {
|
| 596 |
+
"id": "Gi4NlPv-P4RS",
|
| 597 |
+
"outputId": "656d8d6a-1e79-41be-f17a-cab136bf27ea"
|
| 598 |
+
},
|
| 599 |
+
"outputs": [
|
| 600 |
+
{
|
| 601 |
+
"name": "stdout",
|
| 602 |
+
"output_type": "stream",
|
| 603 |
+
"text": [
|
| 604 |
+
"Embedding 1: (256,)\n",
|
| 605 |
+
"Embedding 2: (256,)\n",
|
| 606 |
+
"Embedding 3: (256,)\n",
|
| 607 |
+
"--------------------------------------------------------------------------------\n",
|
| 608 |
+
"similarity function: dot\n",
|
| 609 |
+
"tensor([[0.7855, 0.7382]])\n",
|
| 610 |
+
"🙋♂️ apple vs. banana -> 🤖 score: 0.7854644\n",
|
| 611 |
+
"🙋♂️ apple vs. car -> 🤖 score: 0.7382126\n"
|
| 612 |
+
]
|
| 613 |
+
}
|
| 614 |
+
],
|
| 615 |
+
"source": [
|
| 616 |
+
"model = SentenceTransformer(model_id, truncate_dim=256, similarity_fn_name=\"dot\").to(device=device)\n",
|
| 617 |
+
"embeddings = model.encode(words, prompt_name=\"STS\", normalize_embeddings=True)\n",
|
| 618 |
+
"\n",
|
| 619 |
+
"for idx, embedding in enumerate(embeddings):\n",
|
| 620 |
+
" print(f\"Embedding {idx+1}: {embedding.shape}\")\n",
|
| 621 |
+
"\n",
|
| 622 |
+
"print(\"-\"*80)\n",
|
| 623 |
+
"check_word_similarities()"
|
| 624 |
+
]
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"cell_type": "markdown",
|
| 628 |
+
"metadata": {
|
| 629 |
+
"id": "RYr9uSI_t3fm"
|
| 630 |
+
},
|
| 631 |
+
"source": [
|
| 632 |
+
"## Summary and next steps\n",
|
| 633 |
+
"\n",
|
| 634 |
+
"You are now equipped to generate high-quality text embeddings using EmbeddingGemma and the Sentence Transformers library. Apply these skills to build powerful features like semantic similarity, text classification, and Retrieval-Augmented Generation (RAG) systems, and continue exploring what's possible with Gemma models.\n",
|
| 635 |
+
"\n",
|
| 636 |
+
"Check out the following docs next:\n",
|
| 637 |
+
"\n",
|
| 638 |
+
"* [Fine-tune EmbeddingGemma](https://ai.google.dev/gemma/docs/embeddinggemma/fine-tuning-embeddinggemma-with-sentence-transformers)\n",
|
| 639 |
+
"* [Simple RAG example](https://github.com/google-gemini/gemma-cookbook/blob/main/Gemma/%5BGemma_3%5DRAG_with_EmbeddingGemma.ipynb) in the Gemma Cookbook\n"
|
| 640 |
+
]
|
| 641 |
+
}
|
| 642 |
+
],
|
| 643 |
+
"metadata": {
|
| 644 |
+
"colab": {
|
| 645 |
+
"name": "inference-embeddinggemma-with-sentence-transformers.ipynb",
|
| 646 |
+
"provenance": [],
|
| 647 |
+
"toc_visible": true
|
| 648 |
+
},
|
| 649 |
+
"kernelspec": {
|
| 650 |
+
"display_name": "Python 3",
|
| 651 |
+
"name": "python3"
|
| 652 |
+
}
|
| 653 |
+
},
|
| 654 |
+
"nbformat": 4,
|
| 655 |
+
"nbformat_minor": 0
|
| 656 |
+
}
|
sentence_bert_config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c0aa81fa7c3841c03c150cbac7d613a82043a8768532db55af3763b1fbcfe3
|
| 3 |
+
size 61
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88d16a8f41b25d29d30b7865c65404fa9527bbc9c5a652f68a1c25c4b0ae0fd1
|
| 3 |
+
size 695
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6852f8d561078cc0cebe70ca03c5bfdd0d60a45f9d2e0e1e4cc05b68e9ec329e
|
| 3 |
+
size 33385008
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
|
| 3 |
+
size 4689074
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:24479813a1b6505ea9e95be3021b1c18f7d93e770207dcfb6c25d99631de2130
|
| 3 |
+
size 1206691
|