Initial model upload - clean repository
Browse files- .gitattributes +2 -0
- 1_Pooling/config.json +10 -0
- README.md +326 -0
- config.json +45 -0
- config_sentence_transformers.json +14 -0
- model.onnx +3 -0
- model.safetensors +3 -0
- model_performance_2d.png +3 -0
- modules.json +14 -0
- post_train_retrieval_2d.png +3 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +51 -0
- tokenizer.json +0 -0
- tokenizer_config.json +59 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
model_performance_2d.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
post_train_retrieval_2d.png filter=lfs diff=lfs merge=lfs -text
|
1_Pooling/config.json
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"word_embedding_dimension": 1024,
|
| 3 |
+
"pooling_mode_cls_token": false,
|
| 4 |
+
"pooling_mode_mean_tokens": true,
|
| 5 |
+
"pooling_mode_max_tokens": false,
|
| 6 |
+
"pooling_mode_mean_sqrt_len_tokens": false,
|
| 7 |
+
"pooling_mode_weightedmean_tokens": false,
|
| 8 |
+
"pooling_mode_lasttoken": false,
|
| 9 |
+
"include_prompt": true
|
| 10 |
+
}
|
README.md
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
language:
|
| 3 |
+
- tr
|
| 4 |
+
- en
|
| 5 |
+
library_name: sentence-transformers
|
| 6 |
+
license: apache-2.0
|
| 7 |
+
tags:
|
| 8 |
+
- sentence-transformers
|
| 9 |
+
- sentence-similarity
|
| 10 |
+
- feature-extraction
|
| 11 |
+
- information-retrieval
|
| 12 |
+
- dense-retrieval
|
| 13 |
+
- turkish
|
| 14 |
+
- legal
|
| 15 |
+
- turkish-legal
|
| 16 |
+
- mecellem
|
| 17 |
+
- modernbert
|
| 18 |
+
- TRUBA
|
| 19 |
+
- MN5
|
| 20 |
+
datasets:
|
| 21 |
+
- newmindai/ms-marco-turkish-triplets
|
| 22 |
+
- newmindai/EuroHPC-Legal
|
| 23 |
+
base_model: newmindai/Mursit-Large
|
| 24 |
+
metrics:
|
| 25 |
+
- ndcg@10
|
| 26 |
+
- mrr@10
|
| 27 |
+
- map@100
|
| 28 |
+
pipeline_tag: sentence-similarity
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
# Mursit-Large-TR-Retrieval
|
| 32 |
+
|
| 33 |
+
[](https://github.com/newmindai/mecellem-models) [](https://huggingface.co/spaces/newmindai/Mizan) [](https://opensource.org/licenses/Apache-2.0)
|
| 34 |
+
|
| 35 |
+
## Model Description
|
| 36 |
+
|
| 37 |
+
Mursit-Large-TR-Retrieval is a large-scale Turkish embedding model pre-trained entirely from scratch on Turkish-dominant corpora and fine-tuned for retrieval tasks. The model is based on ModernBERT-large architecture (403M parameters) and optimized specifically for Turkish legal domain applications. This model achieves strong performance on Turkish retrieval benchmarks with 56.87 MTEB Score and 46.56 Legal Score, ranking among the top Turkish embedding models.
|
| 38 |
+
|
| 39 |
+
**Key Features:**
|
| 40 |
+
- Pre-trained from scratch on approximately 112.7 billion tokens of Turkish-dominant corpus
|
| 41 |
+
- Post-trained for embedding tasks using contrastive learning on MS MARCO-TR dataset
|
| 42 |
+
- Achieves strong performance on Turkish legal retrieval benchmarks
|
| 43 |
+
- Optimized for Turkish legal domain with custom tokenizer trained on legal documents
|
| 44 |
+
|
| 45 |
+
**Model Type:** Embedding
|
| 46 |
+
**Parameters:** 403M
|
| 47 |
+
**Base Model:** newmindai/Mursit-Large
|
| 48 |
+
**Architecture:** ModernBERT-large
|
| 49 |
+
**Embedding Dimension:** 1,024
|
| 50 |
+
**Max Sequence Length:** 2,048 tokens
|
| 51 |
+
|
| 52 |
+
### Architecture Details
|
| 53 |
+
|
| 54 |
+
The model is based on ModernBERT-large architecture:
|
| 55 |
+
|
| 56 |
+
- **Attention Mechanism:** Alternating local and global attention
|
| 57 |
+
- **Normalization:** Pre-layer normalization with RMSNorm
|
| 58 |
+
- **Activation:** GeGLU (Gated Linear Units with GELU) in MLP layers
|
| 59 |
+
- **Position Embeddings:** Rotary positional embeddings (RoPE) with θ=20,000
|
| 60 |
+
- **Context Length:** 2,048 tokens
|
| 61 |
+
- **Layers:** 28 transformer layers
|
| 62 |
+
- **Hidden Size:** 1,024
|
| 63 |
+
- **FFN Size:** 2,624
|
| 64 |
+
- **Attention Heads:** 16 heads with 64 dimensions each
|
| 65 |
+
- **Window Size:** 128 (for sliding window attention in local layers)
|
| 66 |
+
- **Vocabulary Size:** 59,008 tokens
|
| 67 |
+
|
| 68 |
+
### Training Details
|
| 69 |
+
|
| 70 |
+
**Pre-training:**
|
| 71 |
+
- **Dataset:** Turkish-dominant corpus totaling approximately 112.7 billion tokens
|
| 72 |
+
- **Legal Sources:**
|
| 73 |
+
- Court of Cassation (Yargıtay): 10.3M sequences, ~3.43B tokens
|
| 74 |
+
- Council of State (Danıştay): 151K sequences, ~0.11B tokens
|
| 75 |
+
- Academic theses (YÖKTEZ): 21.1M sequences, ~9.61B tokens (after DocsOCR processing)
|
| 76 |
+
- **General Turkish Sources:**
|
| 77 |
+
- FineWeb2: General Turkish web data
|
| 78 |
+
- CulturaX: Multilingual corpus (Turkish subset)
|
| 79 |
+
- Total general Turkish: 212M sequences, ~96.17B tokens
|
| 80 |
+
- **Data Processing:** SemHash-based semantic deduplication, FineWeb quality filtering, URL-based filtering, page-packing for YÖKTEZ documents
|
| 81 |
+
- **Training Method:** Masked Language Modeling (MLM) with 15% masking probability
|
| 82 |
+
- **Masking Strategy:** 80% [MASK], 10% random token, 10% unchanged (80-10-10 strategy)
|
| 83 |
+
- **Framework:** MosaicML Composer with Decoupled StableAdamW optimizer
|
| 84 |
+
- **Learning Rate:** 8×10⁻⁴ with warmup_stable_decay schedule
|
| 85 |
+
- **Precision:** BF16 mixed precision
|
| 86 |
+
- **Hardware Infrastructure:**
|
| 87 |
+
- **System:** MareNostrum 5 ACC partition at Barcelona Supercomputing Center (BSC)
|
| 88 |
+
- **Compute Nodes:** 32 nodes
|
| 89 |
+
- **GPUs:** 128× NVIDIA Hopper H100 64GB GPUs (4 GPUs per node)
|
| 90 |
+
- **Node Configuration:** Each node equipped with 4× H100 GPUs, 80 CPU cores, 512GB DDR5 memory
|
| 91 |
+
- **Interconnect:** 800 Gb/s InfiniBand for distributed training
|
| 92 |
+
- **GPU Interconnect:** NVLink for intra-node GPU communication (4 GPUs per node connected via NVLink)
|
| 93 |
+
- **Distributed Training:** Multi-node distributed training across 32 nodes with InfiniBand interconnect
|
| 94 |
+
|
| 95 |
+
**Post-training for Embeddings:**
|
| 96 |
+
- **Dataset:** MS MARCO-TR (920,106 triplets)
|
| 97 |
+
- **Loss Function:** CachedGISTEmbedLoss with BGE-M3 guide model (568M parameters, 1024-dimensional embeddings)
|
| 98 |
+
- **Training Framework:** Sentence Transformers
|
| 99 |
+
- **Optimization:** Contrastive learning on Turkish passage ranking dataset
|
| 100 |
+
- **Hardware:** 4× H100 GPUs (single node, NVLink interconnect)
|
| 101 |
+
- **Optimizer:** AdamW (learning rate: 2×10⁻⁵, weight decay: 0.01)
|
| 102 |
+
|
| 103 |
+
## Performance on MTEB-Turkish Benchmark
|
| 104 |
+
|
| 105 |
+
The following visualization shows the model's performance compared to other Turkish language models:
|
| 106 |
+
|
| 107 |
+

|
| 108 |
+
|
| 109 |
+
*Model Performance Comparison: Legal Score vs. MTEB Score. Embedding models (green triangles) show superior performance compared to MLM models. Mursit-Large-TR-Retrieval achieves strong performance with 56.87 MTEB Score and 46.56 Legal Score, ranking among the top Turkish embedding models.*
|
| 110 |
+
|
| 111 |
+
This model was evaluated on the comprehensive MTEB-Turkish benchmark, which includes 17 tasks across 5 task types. The benchmark evaluates models on general Turkish NLP tasks as well as domain-specific legal retrieval tasks.
|
| 112 |
+
|
| 113 |
+
### Comprehensive Benchmark Results
|
| 114 |
+
|
| 115 |
+
The following table presents comprehensive evaluation results across all models evaluated on the MTEB-Turkish benchmark. *This model's results are highlighted in italics.*
|
| 116 |
+
|
| 117 |
+
| Model | MTEB | Legal | Cls. | Clus. | Pair | Ret. | STS | Cont. | Reg. | Case | Params | Type |
|
| 118 |
+
|-------|------|-------|------|-------|------|------|-----|-------|------|------|--------|------|
|
| 119 |
+
| embeddinggemma-300m | **65.42** | 50.63 | **77.74** | **45.05** | **80.02** | **55.06** | 69.22 | 83.97 | **39.56** | 28.38 | 307M | Emb. |
|
| 120 |
+
| bge-m3 | 62.87 | **51.16** | 75.35 | 35.86 | 78.88 | 54.42 | **69.83** | **86.08** | 38.09 | **29.3** | 567M | Emb. |
|
| 121 |
+
| Mursit-Embed-Qwen3-1.7B-TR | 56.84 | 34.76 | 68.46 | 42.22 | 59.67 | 50.1 | 63.77 | 70.22 | 17.94 | 16.11 | 1.7B | CLM-E. |
|
| 122 |
+
| *Mursit-Large-TR-Retrieval* | 56.87 | 46.56 | 67.72 | 41.15 | 59.78 | 51.69 | 64.01 | 81.78 | 32.67 | 25.24 | 403M | Emb. |
|
| 123 |
+
| Mursit-Base-TR-Retrieval | 55.86 | 47.52 | 66.25 | 39.75 | 61.31 | 50.07 | 61.9 | 80.4 | 34.1 | 28.07 | 155M | Emb. |
|
| 124 |
+
| Mursit-Embed-Qwen3-4B-TR | 53.65 | 37.0 | 67.29 | 36.68 | 58.36 | 51.12 | 54.77 | 69.25 | 24.21 | 17.56 | 4B | CLM-E. |
|
| 125 |
+
|-------|------|-------|------|------|------|------|-----|-------|------|------|--------|------|
|
| 126 |
+
| bert-base-turkish-uncased | 46.23 | 24.94 | 68.05 | 33.81 | 60.44 | 32.01 | 36.85 | 52.47 | 12.05 | 10.29 | 110M | MLM |
|
| 127 |
+
| turkish-large-bert-cased | 45.3 | 19.12 | 67.43 | 34.24 | 60.11 | 28.68 | 36.04 | 47.57 | 5.93 | 3.85 | 337M | MLM |
|
| 128 |
+
| bert-base-turkish-cased | 45.17 | 24.41 | 66.39 | 35.28 | 60.05 | 30.52 | 33.62 | 54.03 | 10.13 | 9.07 | 110M | MLM |
|
| 129 |
+
| BERTurk-Legal | 42.02 | 32.63 | 60.61 | 26.24 | 59.51 | 25.8 | 37.94 | 61.4 | 15.51 | 20.99 | 184M | MLM |
|
| 130 |
+
| Mursit-Large | 41.75 | 23.71 | 62.95 | 25.34 | 58.04 | 27.4 | 35.01 | 42.74 | 11.29 | 17.1 | 403M | MLM |
|
| 131 |
+
| turkish-base-bert-uncased | 44.68 | 27.58 | 66.22 | 30.23 | 58.84 | 31.4 | 36.74 | 56.6 | 13.39 | 12.74 | 110M | MLM |
|
| 132 |
+
| Mursit-Base | 40.23 | 17.93 | 59.78 | 25.48 | 58.65 | 20.82 | 36.45 | 36.0 | 7.4 | 10.4 | 155M | MLM |
|
| 133 |
+
| mmBERT-base | 39.65 | 12.15 | 61.84 | 26.77 | 59.25 | 15.83 | 34.56 | 34.45 | 1.33 | 0.68 | 306M | MLM |
|
| 134 |
+
| TabiBERT | 37.77 | 11.5 | 59.63 | 25.75 | 58.19 | 14.96 | 30.32 | 32.02 | 1.86 | 0.63 | 148M | MLM |
|
| 135 |
+
| ModernBERT-base | 23.8 | 2.99 | 39.06 | 2.01 | 53.95 | 2.1 | 21.91 | 7.92 | 0.62 | 0.43 | 149M | MLM |
|
| 136 |
+
| ModernBERT-large | 23.74 | 2.44 | 39.44 | 3.9 | 53.73 | 1.8 | 19.85 | 6.12 | 0.62 | 0.59 | 394M | MLM |
|
| 137 |
+
|
| 138 |
+
**Column abbreviations:** MTEB = mean performance across task types; Legal = weighted average of Contracts, Regulation, Caselaw; Classification = accuracy on Turkish classification tasks; Clustering = V-measure on clustering tasks; Pair Classification = average precision on pair classification tasks like NLI; Retrieval = nDCG@10 on information retrieval tasks; Semantic Textual Similarity = Spearman correlation; Contracts = nDCG@10 on legal contract retrieval; Regulation = nDCG@10 on regulatory text retrieval; Caselaw = nDCG@10 on case law retrieval; Number of Parameters = number of model parameters; Model Type = model type (Embedding, CLM-Embedding, Masked Language Model). **Bold values** indicate the highest score in each column.
|
| 139 |
+
|
| 140 |
+
**Key Findings:**
|
| 141 |
+
- The model achieves strong performance with 56.87 MTEB Score and 46.56 Legal Score, ranking among the top Turkish embedding models
|
| 142 |
+
- Strong performance on Contracts retrieval (81.78 nDCG@10) demonstrates effectiveness for legal document search
|
| 143 |
+
- Post-training on MS MARCO-TR significantly improves retrieval capabilities compared to base MLM models
|
| 144 |
+
|
| 145 |
+
### Post-Training Performance Analysis
|
| 146 |
+
|
| 147 |
+
The following visualization shows the impact of post-training on retrieval performance:
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
|
| 151 |
+
*Post-Training Retrieval Performance Comparison. Post-trained models (Mursit-Base-TR-Retrieval and Mursit-Large-TR-Retrieval) show significant improvements in legal domain retrieval tasks compared to base MLM models.*
|
| 152 |
+
|
| 153 |
+
## Reproducibility
|
| 154 |
+
|
| 155 |
+
To reproduce the benchmark results and training procedures for this model, please refer to:
|
| 156 |
+
|
| 157 |
+
- **Post-Training:** [github.com/newmindai/mecellem-models/training/post-training-retrieval](https://github.com/newmindai/mecellem-models/tree/main/training/post-training-retrieval) - Contains code and configurations for post-training retrieval models on MS MARCO-TR dataset.
|
| 158 |
+
- **Embedding Benchmark Results:** [github.com/newmindai/mecellem-models/benchmark/embedding_model](https://github.com/newmindai/mecellem-models/tree/main/benchmark/embedding_model) - Contains code and evaluation configurations for reproducing MTEB-Turkish benchmark results.
|
| 159 |
+
|
| 160 |
+
## Usage
|
| 161 |
+
|
| 162 |
+
### Installation
|
| 163 |
+
|
| 164 |
+
```bash
|
| 165 |
+
pip install sentence-transformers
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
### Basic Usage
|
| 169 |
+
|
| 170 |
+
```python
|
| 171 |
+
from sentence_transformers import SentenceTransformer
|
| 172 |
+
|
| 173 |
+
# Load model
|
| 174 |
+
model = SentenceTransformer("newmindai/Mursit-Large-TR-Retrieval")
|
| 175 |
+
|
| 176 |
+
# Encode sentences
|
| 177 |
+
sentences = [
|
| 178 |
+
"Türk hukuk sistemi medeni hukuk geleneğine dayanır",
|
| 179 |
+
"Anayasa Türkiye Cumhuriyeti'nin temel hukuk belgesidir"
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
embeddings = model.encode(sentences)
|
| 183 |
+
print(embeddings.shape) # (2, 1024)
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
### Information Retrieval
|
| 187 |
+
|
| 188 |
+
```python
|
| 189 |
+
from sentence_transformers import SentenceTransformer, util
|
| 190 |
+
|
| 191 |
+
model = SentenceTransformer("newmindai/Mursit-Large-TR-Retrieval")
|
| 192 |
+
|
| 193 |
+
query = "Sözleşme feshi nasıl yapılır?"
|
| 194 |
+
documents = [
|
| 195 |
+
"Sözleşmeler yazılı olarak feshedilebilir.",
|
| 196 |
+
"İş kanunu çalışma koşullarını düzenler."
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
query_embedding = model.encode(query, convert_to_tensor=True)
|
| 200 |
+
doc_embeddings = model.encode(documents, convert_to_tensor=True)
|
| 201 |
+
scores = util.cos_sim(query_embedding, doc_embeddings)[0]
|
| 202 |
+
|
| 203 |
+
results = [(doc, score.item()) for doc, score in zip(documents, scores)]
|
| 204 |
+
results.sort(key=lambda x: x[1], reverse=True)
|
| 205 |
+
|
| 206 |
+
for doc, score in results:
|
| 207 |
+
print(f"Score: {score:.4f} - {doc}")
|
| 208 |
+
```
|
| 209 |
+
# ONNX Model Inference
|
| 210 |
+
|
| 211 |
+
This script demonstrates how to use the ONNX model from Hugging Face for text embedding generation.
|
| 212 |
+
|
| 213 |
+
## Exporting Model to ONNX
|
| 214 |
+
|
| 215 |
+
To export the model to ONNX format, use the `optimum-cli` command:
|
| 216 |
+
|
| 217 |
+
```bash
|
| 218 |
+
optimum-cli export onnx \
|
| 219 |
+
-m newmindai/Mursit-Large-TR-Retrieval \
|
| 220 |
+
--task feature-extraction \
|
| 221 |
+
onnx/MursitLargeTRRetrieval
|
| 222 |
+
```
|
| 223 |
+
|
| 224 |
+
This will create the `model.onnx` file in the specified output directory.
|
| 225 |
+
|
| 226 |
+
## Installation
|
| 227 |
+
|
| 228 |
+
```bash
|
| 229 |
+
pip install onnxruntime-gpu transformers huggingface_hub numpy
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
## Usage
|
| 233 |
+
|
| 234 |
+
```python
|
| 235 |
+
import onnxruntime as ort
|
| 236 |
+
from transformers import AutoTokenizer
|
| 237 |
+
from huggingface_hub import hf_hub_download
|
| 238 |
+
import numpy as np
|
| 239 |
+
|
| 240 |
+
model_id = "newmindai/Mursit-Large-TR-Retrieval"
|
| 241 |
+
|
| 242 |
+
# Load tokenizer and download ONNX model from Hugging Face
|
| 243 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 244 |
+
onnx_path = hf_hub_download(repo_id=model_id, filename="model.onnx")
|
| 245 |
+
|
| 246 |
+
# Use GPU if available, otherwise fallback to CPU
|
| 247 |
+
providers = ["CUDAExecutionProvider", "CPUExecutionProvider"] if "CUDAExecutionProvider" in ort.get_available_providers() else ["CPUExecutionProvider"]
|
| 248 |
+
|
| 249 |
+
sess = ort.InferenceSession(onnx_path, providers=providers)
|
| 250 |
+
|
| 251 |
+
texts = ["This is a test"]
|
| 252 |
+
inputs = tokenizer(texts, padding=True, truncation=True, return_tensors="np")
|
| 253 |
+
|
| 254 |
+
outputs = sess.run(None, {
|
| 255 |
+
"input_ids": inputs["input_ids"].astype(np.int64),
|
| 256 |
+
"attention_mask": inputs["attention_mask"].astype(np.int64),
|
| 257 |
+
})
|
| 258 |
+
|
| 259 |
+
embeddings = outputs[-1] # sentence_embedding is usually the last output
|
| 260 |
+
print(embeddings.shape)
|
| 261 |
+
print(embeddings[:1])
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
## Features
|
| 265 |
+
|
| 266 |
+
- **Automatic GPU/CPU selection**: Uses CUDA if available, otherwise falls back to CPU
|
| 267 |
+
- **Hugging Face integration**: Downloads model files directly from Hugging Face Hub
|
| 268 |
+
- **Simple API**: Easy-to-use interface for text embedding generation
|
| 269 |
+
|
| 270 |
+
## Use Cases
|
| 271 |
+
|
| 272 |
+
- Semantic search in Turkish legal documents
|
| 273 |
+
- Legal document retrieval and ranking
|
| 274 |
+
- Contract similarity and matching
|
| 275 |
+
- Regulation compliance checking
|
| 276 |
+
- Case law research and discovery
|
| 277 |
+
- Question answering systems for legal domain
|
| 278 |
+
|
| 279 |
+
## Reproducibility
|
| 280 |
+
|
| 281 |
+
To reproduce the benchmark results and training procedures for this model, please refer to:
|
| 282 |
+
|
| 283 |
+
- **Post-Training:** [github.com/newmindai/mecellem-models/training/post-training-retrieval](https://github.com/newmindai/mecellem-models/tree/main/training/post-training-retrieval) - Contains code and configurations for post-training retrieval models on MS MARCO-TR dataset.
|
| 284 |
+
- **Embedding Benchmark Results:** [github.com/newmindai/mecellem-models/benchmark/embedding_model](https://github.com/newmindai/mecellem-models/tree/main/benchmark/embedding_model) - Contains code and evaluation configurations for reproducing MTEB-Turkish benchmark results.
|
| 285 |
+
|
| 286 |
+
## Acknowledgments
|
| 287 |
+
|
| 288 |
+
This work was supported by the EuroHPC Joint Undertaking through project etur46 with access to the MareNostrum 5 supercomputer, hosted by Barcelona Supercomputing Center (BSC), Spain. MareNostrum 5 is owned by EuroHPC JU and operated by BSC. We are grateful to the BSC support team for their assistance with job scheduling, environment configuration, and technical guidance throughout the project.
|
| 289 |
+
|
| 290 |
+
The numerical calculations reported in this work were fully/partially performed at TÜBİTAK ULAKBİM, High Performance and Grid Computing Center (TRUBA resources). The authors gratefully acknowledge the know-how provided by the MINERVA Support for expert guidance and collaboration opportunities in HPC-AI integration.
|
| 291 |
+
|
| 292 |
+
## References
|
| 293 |
+
|
| 294 |
+
If you use this model, please cite our paper:
|
| 295 |
+
|
| 296 |
+
```bibtex
|
| 297 |
+
@article{mecellem2026,
|
| 298 |
+
title={Mecellem Models: Turkish Models Trained from Scratch and Continually Pre-trained for the Legal Domain},
|
| 299 |
+
author={Uğur, Özgür and Göksu, Mahmut and Şavirdi, Esra and Çimen, Mahmut and Yılmaz, Musa and Demir, Alp Talha and Güllüce, Rumeysa and Çetin, İclal and Sağbaş, Ömer Can},
|
| 300 |
+
journal={Procedia Computer Science},
|
| 301 |
+
year={2026},
|
| 302 |
+
publisher={Elsevier}
|
| 303 |
+
}
|
| 304 |
+
```
|
| 305 |
+
### Base Model References
|
| 306 |
+
|
| 307 |
+
```bibtex
|
| 308 |
+
@inproceedings{modernbert2025,
|
| 309 |
+
title={ModernBERT: A Modern Bidirectional Encoder Transformer},
|
| 310 |
+
author={Answer.AI and LightOn},
|
| 311 |
+
booktitle={Proceedings of the 2025 Conference on Language Models},
|
| 312 |
+
year={2025}
|
| 313 |
+
}
|
| 314 |
+
```
|
| 315 |
+
```bibtex
|
| 316 |
+
@misc{bge-m3,
|
| 317 |
+
title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation},
|
| 318 |
+
author={Chen, Jianlv and Xiao, Shitao and Zhang, Peitian and Luo, Kun and Lian, Defu and Liu, Zheng},
|
| 319 |
+
year={2024},
|
| 320 |
+
eprint={2402.03216},
|
| 321 |
+
archivePrefix={arXiv},
|
| 322 |
+
primaryClass={cs.CL}
|
| 323 |
+
}
|
| 324 |
+
```
|
| 325 |
+
|
| 326 |
+
<!-- Updated: 2026-01-15 09:38:18 -->
|
config.json
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"ModernBertModel"
|
| 4 |
+
],
|
| 5 |
+
"attention_bias": false,
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 1,
|
| 8 |
+
"classifier_activation": "silu",
|
| 9 |
+
"classifier_bias": false,
|
| 10 |
+
"classifier_dropout": 0.0,
|
| 11 |
+
"classifier_pooling": "mean",
|
| 12 |
+
"cls_token_id": 1,
|
| 13 |
+
"decoder_bias": true,
|
| 14 |
+
"deterministic_flash_attn": false,
|
| 15 |
+
"dtype": "float32",
|
| 16 |
+
"embedding_dropout": 0.0,
|
| 17 |
+
"eos_token_id": 2,
|
| 18 |
+
"global_attn_every_n_layers": 3,
|
| 19 |
+
"global_rope_theta": 20000.0,
|
| 20 |
+
"gradient_checkpointing": false,
|
| 21 |
+
"hidden_activation": "gelu",
|
| 22 |
+
"hidden_size": 1024,
|
| 23 |
+
"initializer_cutoff_factor": 2.0,
|
| 24 |
+
"initializer_range": 0.02,
|
| 25 |
+
"intermediate_size": 2624,
|
| 26 |
+
"layer_norm_eps": 1e-05,
|
| 27 |
+
"local_attention": 128,
|
| 28 |
+
"local_rope_theta": 20000.0,
|
| 29 |
+
"max_position_embeddings": 2048,
|
| 30 |
+
"mlp_bias": false,
|
| 31 |
+
"mlp_dropout": 0.0,
|
| 32 |
+
"model_type": "modernbert",
|
| 33 |
+
"norm_bias": false,
|
| 34 |
+
"norm_eps": 1e-05,
|
| 35 |
+
"num_attention_heads": 16,
|
| 36 |
+
"num_hidden_layers": 28,
|
| 37 |
+
"pad_token_id": 0,
|
| 38 |
+
"position_embedding_type": "absolute",
|
| 39 |
+
"repad_logits_with_grad": false,
|
| 40 |
+
"sep_token_id": 2,
|
| 41 |
+
"sparse_pred_ignore_index": -100,
|
| 42 |
+
"sparse_prediction": false,
|
| 43 |
+
"transformers_version": "4.57.0",
|
| 44 |
+
"vocab_size": 59008
|
| 45 |
+
}
|
config_sentence_transformers.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "SentenceTransformer",
|
| 3 |
+
"__version__": {
|
| 4 |
+
"sentence_transformers": "5.1.1",
|
| 5 |
+
"transformers": "4.57.0",
|
| 6 |
+
"pytorch": "2.8.0+cu128"
|
| 7 |
+
},
|
| 8 |
+
"prompts": {
|
| 9 |
+
"query": "",
|
| 10 |
+
"document": ""
|
| 11 |
+
},
|
| 12 |
+
"default_prompt_name": null,
|
| 13 |
+
"similarity_fn_name": "cosine"
|
| 14 |
+
}
|
model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b33709916375a1e3fe90463312b8da9d2fd54100fb7b9959e28018866361a20
|
| 3 |
+
size 1615375340
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4188d15c1c6015778bbdea4fba35189125f46e4271dc5826d2d49f01633cbeea
|
| 3 |
+
size 1614533136
|
model_performance_2d.png
ADDED
|
Git LFS Details
|
modules.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"idx": 0,
|
| 4 |
+
"name": "0",
|
| 5 |
+
"path": "",
|
| 6 |
+
"type": "sentence_transformers.models.Transformer"
|
| 7 |
+
},
|
| 8 |
+
{
|
| 9 |
+
"idx": 1,
|
| 10 |
+
"name": "1",
|
| 11 |
+
"path": "1_Pooling",
|
| 12 |
+
"type": "sentence_transformers.models.Pooling"
|
| 13 |
+
}
|
| 14 |
+
]
|
post_train_retrieval_2d.png
ADDED
|
Git LFS Details
|
sentence_bert_config.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"max_seq_length": 2048,
|
| 3 |
+
"do_lower_case": false
|
| 4 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"cls_token": {
|
| 10 |
+
"content": "<s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"eos_token": {
|
| 17 |
+
"content": "</s>",
|
| 18 |
+
"lstrip": false,
|
| 19 |
+
"normalized": false,
|
| 20 |
+
"rstrip": false,
|
| 21 |
+
"single_word": false
|
| 22 |
+
},
|
| 23 |
+
"mask_token": {
|
| 24 |
+
"content": "<mask>",
|
| 25 |
+
"lstrip": true,
|
| 26 |
+
"normalized": false,
|
| 27 |
+
"rstrip": false,
|
| 28 |
+
"single_word": false
|
| 29 |
+
},
|
| 30 |
+
"pad_token": {
|
| 31 |
+
"content": "[PAD]",
|
| 32 |
+
"lstrip": false,
|
| 33 |
+
"normalized": false,
|
| 34 |
+
"rstrip": false,
|
| 35 |
+
"single_word": false
|
| 36 |
+
},
|
| 37 |
+
"sep_token": {
|
| 38 |
+
"content": "</s>",
|
| 39 |
+
"lstrip": false,
|
| 40 |
+
"normalized": false,
|
| 41 |
+
"rstrip": false,
|
| 42 |
+
"single_word": false
|
| 43 |
+
},
|
| 44 |
+
"unk_token": {
|
| 45 |
+
"content": "<unk>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false
|
| 50 |
+
}
|
| 51 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"added_tokens_decoder": {
|
| 3 |
+
"0": {
|
| 4 |
+
"content": "[PAD]",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false,
|
| 9 |
+
"special": true
|
| 10 |
+
},
|
| 11 |
+
"1": {
|
| 12 |
+
"content": "<s>",
|
| 13 |
+
"lstrip": false,
|
| 14 |
+
"normalized": false,
|
| 15 |
+
"rstrip": false,
|
| 16 |
+
"single_word": false,
|
| 17 |
+
"special": true
|
| 18 |
+
},
|
| 19 |
+
"2": {
|
| 20 |
+
"content": "</s>",
|
| 21 |
+
"lstrip": false,
|
| 22 |
+
"normalized": false,
|
| 23 |
+
"rstrip": false,
|
| 24 |
+
"single_word": false,
|
| 25 |
+
"special": true
|
| 26 |
+
},
|
| 27 |
+
"3": {
|
| 28 |
+
"content": "<unk>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false,
|
| 33 |
+
"special": true
|
| 34 |
+
},
|
| 35 |
+
"4": {
|
| 36 |
+
"content": "<mask>",
|
| 37 |
+
"lstrip": true,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false,
|
| 41 |
+
"special": true
|
| 42 |
+
}
|
| 43 |
+
},
|
| 44 |
+
"bos_token": "<s>",
|
| 45 |
+
"clean_up_tokenization_spaces": false,
|
| 46 |
+
"cls_token": "<s>",
|
| 47 |
+
"eos_token": "</s>",
|
| 48 |
+
"extra_special_tokens": {},
|
| 49 |
+
"mask_token": "<mask>",
|
| 50 |
+
"model_input_names": [
|
| 51 |
+
"input_ids",
|
| 52 |
+
"attention_mask"
|
| 53 |
+
],
|
| 54 |
+
"model_max_length": 2048,
|
| 55 |
+
"pad_token": "[PAD]",
|
| 56 |
+
"sep_token": "</s>",
|
| 57 |
+
"tokenizer_class": "PreTrainedTokenizerFast",
|
| 58 |
+
"unk_token": "<unk>"
|
| 59 |
+
}
|