Upload 5 files
Browse files- README.md +24 -0
- config.json +14 -0
- model.safetensors +3 -0
- modules.json +14 -0
- tokenizer.json +0 -0
README.md
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: model2vec
|
| 3 |
+
license: mit
|
| 4 |
+
model_name: cnmoro/custom-model2vec-tokenlearn-medium
|
| 5 |
+
tags:
|
| 6 |
+
- embeddings
|
| 7 |
+
- static-embeddings
|
| 8 |
+
- sentence-transformers
|
| 9 |
+
---
|
| 10 |
+
A custom model2vec model, trained using a modified version of the [tokenlearn](https://github.com/MinishLab/tokenlearn) library.
|
| 11 |
+
|
| 12 |
+
The output dimension is 256, and the vocabulary size is 249.999
|
| 13 |
+
|
| 14 |
+
The training process used a mix of English (10%) and Portuguese (90%) texts.
|
| 15 |
+
|
| 16 |
+
```python
|
| 17 |
+
from sentence_transformers import SentenceTransformer
|
| 18 |
+
|
| 19 |
+
# Load a pretrained Sentence Transformer model
|
| 20 |
+
model = SentenceTransformer("cnmoro/custom-model2vec-tokenlearn-medium")
|
| 21 |
+
|
| 22 |
+
# Compute text embeddings
|
| 23 |
+
embeddings = model.encode(["Example sentence"])
|
| 24 |
+
```
|
config.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "model2vec",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"StaticModel"
|
| 5 |
+
],
|
| 6 |
+
"tokenizer_name": "nomic-ai/nomic-embed-text-v2-moe",
|
| 7 |
+
"apply_pca": 256,
|
| 8 |
+
"sif_coefficient": 0.0001,
|
| 9 |
+
"hidden_dim": 256,
|
| 10 |
+
"seq_length": 1000000,
|
| 11 |
+
"normalize": true,
|
| 12 |
+
"pooling": "mean",
|
| 13 |
+
"embedding_dtype": "float32"
|
| 14 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ccec285a91720334f3f80e0a551c29dfca821d0dc3c6f75b4593da745eccd400
|
| 3 |
+
size 255999064
|
modules.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"name": "0",
|
| 5 |
+
"path": ".",
|
| 6 |
+
"type": "sentence_transformers.models.StaticEmbedding"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"idx": 1,
|
| 10 |
+
"name": "1",
|
| 11 |
+
"path": "1_Normalize",
|
| 12 |
+
"type": "sentence_transformers.models.Normalize"
|
| 13 |
+
}
|
| 14 |
+
]
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|