license: other
task_categories:
- text-classification
- question-answering
- zero-shot-classification
- sentence-similarity
language:
- ja
pretty_name: The Lightweight Version of JMTEB
size_categories:
- 100M<n<1B
configs:
- config_name: livedoor_news
data_files:
- split: train
path: data/livedoor_news/train.parquet
- split: validation
path: data/livedoor_news/validation.parquet
- split: test
path: data/livedoor_news/test.parquet
- config_name: mewsc16_ja
data_files:
- split: validation
path: data/mewsc16_ja/validation.parquet
- split: test
path: data/mewsc16_ja/test.parquet
- config_name: sib200_japanese_clustering
data_files:
- split: train
path: data/sib200_japanese_clustering/train.parquet
- split: validation
path: data/sib200_japanese_clustering/validation.parquet
- split: test
path: data/sib200_japanese_clustering/test.parquet
- config_name: amazon_review_classification
data_files:
- split: train
path: data/amazon_review_classification/train.parquet
- split: validation
path: data/amazon_review_classification/validation.parquet
- split: test
path: data/amazon_review_classification/test.parquet
- config_name: amazon_counterfactual_classification
data_files:
- split: train
path: data/amazon_counterfactual_classification/train.parquet
- split: validation
path: data/amazon_counterfactual_classification/validation.parquet
- split: test
path: data/amazon_counterfactual_classification/test.parquet
- config_name: massive_intent_classification
data_files:
- split: train
path: data/massive_intent_classification/train.parquet
- split: validation
path: data/massive_intent_classification/validation.parquet
- split: test
path: data/massive_intent_classification/test.parquet
- config_name: massive_scenario_classification
data_files:
- split: train
path: data/massive_scenario_classification/train.parquet
- split: validation
path: data/massive_scenario_classification/validation.parquet
- split: test
path: data/massive_scenario_classification/test.parquet
- config_name: japanese_sentiment_classification
data_files:
- split: train
path: data/japanese_sentiment_classification/train.parquet
- split: validation
path: data/japanese_sentiment_classification/validation.parquet
- split: test
path: data/japanese_sentiment_classification/test.parquet
- config_name: sib200_japanese_classification
data_files:
- split: train
path: data/sib200_japanese_classification/train.parquet
- split: validation
path: data/sib200_japanese_classification/validation.parquet
- split: test
path: data/sib200_japanese_classification/test.parquet
- config_name: wrime_classification
data_files:
- split: train
path: data/wrime_classification/train.parquet
- split: validation
path: data/wrime_classification/validation.parquet
- split: test
path: data/wrime_classification/test.parquet
- config_name: jsts
data_files:
- split: train
path: data/jsts/train.parquet
- split: test
path: data/jsts/test.parquet
- config_name: jsick
data_files:
- split: train
path: data/jsick/train.parquet
- split: validation
path: data/jsick/validation.parquet
- split: test
path: data/jsick/test.parquet
- config_name: jaqket-query
data_files:
- split: train
path: data/jaqket-query/train.parquet
- split: validation
path: data/jaqket-query/validation.parquet
- split: test
path: data/jaqket-query/test.parquet
- config_name: jaqket-corpus
data_files:
- split: corpus
path: data/jaqket-corpus/corpus.parquet
- config_name: mrtydi-query
data_files:
- split: train
path: data/mrtydi-query/train.parquet
- split: validation
path: data/mrtydi-query/validation.parquet
- split: test
path: data/mrtydi-query/test.parquet
- config_name: mrtydi-corpus
data_files:
- split: corpus
path: data/mrtydi-corpus/corpus.parquet
- config_name: jagovfaqs_22k-query
data_files:
- split: train
path: data/jagovfaqs_22k-query/train.parquet
- split: validation
path: data/jagovfaqs_22k-query/validation.parquet
- split: test
path: data/jagovfaqs_22k-query/test.parquet
- config_name: jagovfaqs_22k-corpus
data_files:
- split: corpus
path: data/jagovfaqs_22k-corpus/corpus.parquet
- config_name: nlp_journal_title_abs-query
data_files:
- split: validation
path: data/nlp_journal_title_abs-query/validation.parquet
- split: test
path: data/nlp_journal_title_abs-query/test.parquet
- config_name: nlp_journal_title_abs-corpus
data_files:
- split: corpus
path: data/nlp_journal_title_abs-corpus/corpus.parquet
- config_name: nlp_journal_title_intro-query
data_files:
- split: validation
path: data/nlp_journal_title_intro-query/validation.parquet
- split: test
path: data/nlp_journal_title_intro-query/test.parquet
- config_name: nlp_journal_title_intro-corpus
data_files:
- split: corpus
path: data/nlp_journal_title_intro-corpus/corpus.parquet
- config_name: nlp_journal_abs_intro-query
data_files:
- split: validation
path: data/nlp_journal_abs_intro-query/validation.parquet
- split: test
path: data/nlp_journal_abs_intro-query/test.parquet
- config_name: nlp_journal_abs_intro-corpus
data_files:
- split: corpus
path: data/nlp_journal_abs_intro-corpus/corpus.parquet
- config_name: nlp_journal_abs_article-query
data_files:
- split: validation
path: data/nlp_journal_abs_article-query/validation.parquet
- split: test
path: data/nlp_journal_abs_article-query/test.parquet
- config_name: nlp_journal_abs_article-corpus
data_files:
- split: corpus
path: data/nlp_journal_abs_article-corpus/corpus.parquet
- config_name: jacwir-retrieval-query
data_files:
- split: validation
path: data/jacwir-retrieval-query/validation.parquet
- split: test
path: data/jacwir-retrieval-query/test.parquet
- config_name: jacwir-retrieval-corpus
data_files:
- split: corpus
path: data/jacwir-retrieval-corpus/corpus.parquet
- config_name: miracl-retrieval-query
data_files:
- split: train
path: data/miracl-retrieval-query/train.parquet
- split: validation
path: data/miracl-retrieval-query/validation.parquet
- split: test
path: data/miracl-retrieval-query/test.parquet
- config_name: miracl-retrieval-corpus
data_files:
- split: corpus
path: data/miracl-retrieval-corpus/corpus.parquet
- config_name: mldr-retrieval-query
data_files:
- split: train
path: data/mldr-retrieval-query/train.parquet
- split: validation
path: data/mldr-retrieval-query/validation.parquet
- split: test
path: data/mldr-retrieval-query/test.parquet
- config_name: mldr-retrieval-corpus
data_files:
- split: corpus
path: data/mldr-retrieval-corpus/corpus.parquet
- config_name: mintaka-retrieval-query
data_files:
- split: validation
path: data/mintaka-retrieval-query/validation.parquet
- split: test
path: data/mintaka-retrieval-query/test.parquet
- config_name: mintaka-retrieval-corpus
data_files:
- split: corpus
path: data/mintaka-retrieval-corpus/corpus.parquet
- config_name: esci-query
data_files:
- split: train
path: data/esci-query/train.parquet
- split: validation
path: data/esci-query/validation.parquet
- split: test
path: data/esci-query/test.parquet
- config_name: esci-corpus
data_files:
- split: corpus
path: data/esci-corpus/corpus.parquet
- config_name: jqara-query
data_files:
- split: validation
path: data/jqara-query/validation.parquet
- split: test
path: data/jqara-query/test.parquet
- config_name: jqara-corpus
data_files:
- split: corpus
path: data/jqara-corpus/corpus.parquet
- config_name: jacwir-reranking-query
data_files:
- split: validation
path: data/jacwir-reranking-query/validation.parquet
- split: test
path: data/jacwir-reranking-query/test.parquet
- config_name: jacwir-reranking-corpus
data_files:
- split: corpus
path: data/jacwir-reranking-corpus/corpus.parquet
- config_name: miracl-reranking-query
data_files:
- split: train
path: data/miracl-reranking-query/train.parquet
- split: validation
path: data/miracl-reranking-query/validation.parquet
- split: test
path: data/miracl-reranking-query/test.parquet
- config_name: miracl-reranking-corpus
data_files:
- split: corpus
path: data/miracl-reranking-corpus/corpus.parquet
- config_name: mldr-reranking-query
data_files:
- split: train
path: data/mldr-reranking-query/train.parquet
- split: validation
path: data/mldr-reranking-query/validation.parquet
- split: test
path: data/mldr-reranking-query/test.parquet
- config_name: mldr-reranking-corpus
data_files:
- split: corpus
path: data/mldr-reranking-corpus/corpus.parquet
JMTEB-lite: The Lightweight Version of JMTEB
JMTEB-lite is a lightweight version of JMTEB. It makes agile evaluation possible by reaching an average of 5x faster evaluation comparing with JMTEB. The result of JMTEB-lite is proved to be highly relevant with that of JMTEB, making it a faithful preview of JMTEB.
TL;DR
from datasets import load_dataset
dataset = load_dataset("sbintuitions/JMTEB-lite", name="<dataset_name>", split="<split>")
JMTEB_LITE_DATASET_NAMES = (
'livedoor_news',
'mewsc16_ja',
'sib200_japanese_clustering',
'amazon_review_classification',
'amazon_counterfactual_classification',
'massive_intent_classification',
'massive_scenario_classification',
'japanese_sentiment_classification',
'sib200_japanese_classification',
'wrime_classification',
'jsts',
'jsick',
'jaqket-query',
'jaqket-corpus', # lightweight
'mrtydi-query',
'mrtydi-corpus', # lightweight
'jagovfaqs_22k-query',
'jagovfaqs_22k-corpus',
'nlp_journal_title_abs-query',
'nlp_journal_title_abs-corpus',
'nlp_journal_title_intro-query',
'nlp_journal_title_intro-corpus',
'nlp_journal_abs_intro-query',
'nlp_journal_abs_intro-corpus',
'nlp_journal_abs_article-query',
'nlp_journal_abs_article-corpus',
'jacwir-retrieval-query',
'jacwir-retrieval-corpus', # lightweight
'miracl-retrieval-query',
'miracl-retrieval-corpus', # lightweight
'mldr-retrieval-query',
'mldr-retrieval-corpus',
'mintaka-retrieval-query',
'mintaka-retrieval-corpus',
'esci-query',
'esci-corpus',
'jqara-query', # lightweight
'jqara-corpus', # lightweight
'jacwir-reranking-query', # lightweight
'jacwir-reranking-corpus', # lightweight
'miracl-reranking-query',
'miracl-reranking-corpus',
'mldr-reranking-query',
'mldr-reranking-corpus',
)
Introduction
We introduced JMTEB (Japanese Massive Text Embedding Benchmark), a comprehensive evaluation benchmark of Japanese text embedding models. However, the massive size of JMTEB makes evaluation slow and resource demanding. To address this, we now introduce JMTEB-lite, a lightweight version of JMTEB constructed by substaintially reducing corpus size in retrieval and reranking tasks. We have also verified that JMTEB-lite significantly accelerates evaluation while maintaining high fidelity to the full JMTEB.
We recommand to use JMTEB-lite to obtain the preview evaluation results in agile development, and use JMTEB for full and final evaluation.
JMTEB-lite is compatible with the evaluation script of JMTEB: https://github.com/sbintuitions/JMTEB.
Tasks and Datasets
Here is an overview of the tasks and datasets currently included in JMTEB-lite.
Note that only datasets in bold are lightweight, and the rest are exactly the same with the counterparts in JMTEB.
| Task | Dataset | Train | Dev | Test | Document (Retrieval) |
|---|---|---|---|---|---|
| Clustering | Livedoor-News | 5,163 | 1,106 | 1,107 | - |
| MewsC-16-ja | - | 992 | 992 | - | |
| SIB200 Japanese Clustering | 701 | 99 | 204 | - | |
| Classification | AmazonCounterfactualClassification | 5,600 | 466 | 934 | - |
| AmazonReviewClassification | 200,000 | 5,000 | 5,000 | - | |
| MassiveIntentClassification | 11,514 | 2,033 | 2,974 | - | |
| MassiveScenarioClassification | 11,514 | 2,033 | 2,974 | - | |
| Japanese Sentiment Classification | 9,831 | 1,677 | 2,552 | - | |
| SIB200 Japanese Classification | 701 | 99 | 204 | - | |
| WRIME Classification | 30,000 | 2,500 | 2,500 | - | |
| STS | JSTS | 12,451 | - | 1,457 | - |
| JSICK | 5,956 | 1,985 | 1,986 | - | |
| Retrieval | JAQKET | 13,061 | 995 | 997 | 65,802 |
| Mr.TyDi-ja | 3,697 | 928 | 720 | 93,382 | |
| NLP Journal title-abs | - | 127 | 510 | 637 | |
| NLP Journal title-intro | - | 127 | 510 | 637 | |
| NLP Journal abs-intro | - | 127 | 510 | 637 | |
| NLP Journal abs-abstract | - | 127 | 510 | 637 | |
| JaGovFaqs-22k | 15,955 | 3,419 | 3,420 | 22,794 | |
| JaCWIR-Retrieval | - | 1,000 | 4,000 | 302,638 | |
| MIRACL-Retrieval | 2,433 | 1,044 | 860 | 105,064 | |
| MLDR-Retrieval | 2,262 | 200 | 200 | 10,000 | |
| Mintaka-Retrieval | - | 2,313[^1] | 2,313 | 2,313 | |
| Reranking | Esci | 10,141 | 1,790 | 4,206 | 149,999 |
| JaCWIR-Reranking | - | 1,000 | 4,000 | 188,033 | |
| JQaRA | 498 | 1,737 | 1,667 | 172,897 | |
| MIRACL-Reranking | 2,433 | 1,044 | 860 | 37,124 | |
| MLDR-Reranking | 2,262 | 200 | 200 | 5,339 |
[^1]: To keep consistent with MTEB where Mintaka-Retrieval doesn't have a validation set, we set our validation set the same as the test set.
Construction Process
For the 4 retrieval datasets (JAQKET, Mr.TyDi, JaCWIR-Retrieval, MIRACL-Retrieval), we use 5 highly performant models to predict hard negative documents for each query (the query's most 50 semantically similar documents in the corpus), and merge these hard negatives along with golden documents.
For the 2 reranking datasets (JQaRA, JaCWIR-Reranking), we use 5 highly performant models to rerank the documents for each query, and retain top-50 hard negative documents for each query. Then we merge these hard negatives with golden documents.
For the rest, they are kept exactly the same with their counterparts in JMTEB.
Reference
@misc{jmteb_lite,
author = {Li, Shengzhe and Ohagi, Masaya and Ri, Ryokan and Fukuchi, Akihiko and Shibata, Tomohide and Kawahara, Daisuke},
title = {{J}{M}{T}{E}{B}-lite: {T}he {L}ightweight {V}ersion of {JMTEB}},
howpublished = {\url{https://huggingface.co/datasets/sbintuitions/JMTEB-lite}},
year = {2025},
}
@inproceedings{li2026jmteb,
author = {Li, Shengzhe and Ohagi, Masaya and Ri, Ryokan and Fukuchi, Akihiko and Shibata, Tomohide and Kawahara, Daisuke},
title = {{JMTEB and JMTEB-lite: Japanese Massive Text Embedding Benchmark and Its Lightweight Version}},
booktitle = "Proceedings of the Fifteenth Language Resources and Evaluation Conference",
month = may,
year = "2026",
address = "Palma, Mallorca, Spain",
publisher = "European Language Resources Association",
note = "to appear",
}
Legacy Loading Scripts
The Python loading script (JMTEB-lite.py) has been moved to the legacy/ directory for historical reference. It was previously used to load the dataset with trust_remote_code=True, but is no longer required. The dataset now uses Parquet format and can be loaded directly without executing any custom code.
License
Regarding the license information of datasets, please refer to the individual datasets.
Our code is licensed under the Creative Commons Attribution-ShareAlike 4.0 International License.
