Dataset Viewer
Auto-converted to Parquet Duplicate
index
int64
0
10k
example_id
int64
-9,222,251,878,653,584,000
9,222,896,083B
question
stringlengths
6
117
gold_answers
sequencelengths
1
48
emb
sequencelengths
1.02k
1.02k
0
3,051,930,912,491,995,600
: كم من الوقت استغرق بناء البرجين التوأمين
[ "11.0 سنة", "11 سنة" ]
[ 0.034606934, 0.051849365, 0.023620605, -0.002609253, 0.02130127, 0.057403564, -0.010620117, -0.05709839, -0.008728027, -0.0024795532, 0.036712646, 0.02331543, 0.022750854, 0.014373779, 0.013183594, 0.011703491, 0.04498291, 0.009185791, 0.018920898, -0.041290283, -0.025543213,...
2
-4,766,029,169,354,683,000
من الذي يغني أحبك مثل لا يوجد غدا
[ "كريس يونغ" ]
[ -0.036895752, -0.0030441284, -0.019805908, 0.0024089813, 0.001750946, -0.025253296, -0.005836487, -0.0262146, -0.043945312, 0.0158844, 0.043182373, -0.029052734, 0.016937256, -0.03062439, 0.01902771, -0.030044556, -0.0054855347, 0.038879395, 0.005973816, -0.04067993, -0.00119...
3
-5,007,314,435,785,513,000
كم عدد مواسم ملكات الصراخ
[ "2.0", "2" ]
[ -0.016586304, 0.0027313232, 0.015182495, 0.0026302338, -0.0019397736, 0.03189087, 0.017730713, -0.032928467, -0.008049011, 0.012260437, 0.02293396, 0.011833191, 0.018569946, -0.0024757385, 0.020767212, 0.025390625, 0.035949707, 0.0062789917, 0.037322998, -0.013420105, -0.0100...
4
7,960,403,399,608,385,000
متى كانت اخر مرة خاض فريق الليكرز المباريات الاقصائية
[ "2013", "2013" ]
[ -0.016036987, 0.04168701, -0.019119263, -0.017974854, 0.03111267, 0.008285522, 0.00945282, -0.064819336, 0.0063438416, -0.017456055, -0.032073975, -0.026123047, 0.01260376, 0.017471313, -0.02508545, 0.020858765, -0.0061950684, 0.023406982, 0.008651733, 0.04046631, 0.017425537...
5
-6,261,874,607,090,674,000
كم عدد الحلقات في جزء من مسلسل سرينجر ثينجز
[ "3.0", "3" ]
[ -0.034332275, 0.018157959, -0.021530151, -0.06137085, 0.049682617, 0.018844604, -0.00749588, -0.08795166, -0.0046043396, 0.011512756, 0.05847168, 0.014404297, 0.03111267, 0.025314331, 0.028656006, 0.035888672, 0.038024902, 0.015296936, 0.0055999756, -0.03060913, -0.020751953,...
6
563,260,143,484,355,900
من يغني أسمعك يطرق لكنك لا تستطيع أن تأتي
[ "ديف إدموندز" ]
[ -0.016174316, 0.016326904, -0.0026340485, -0.013137817, 0.029037476, 0.022277832, 0.000004708767, -0.054870605, -0.010925293, 0.027236938, 0.017700195, -0.016403198, 0.044647217, -0.016647339, 0.014671326, 0.035614014, -0.026153564, 0.05508423, 0.00081443787, -0.019485474, 0....
7
5,144,765,284,694,516,000
من مبدع مسلسل ذا فويس
[ "جون دي مول" ]
[ -0.027938843, 0.03829956, -0.026351929, -0.024139404, 0.02331543, -0.033599854, -0.020385742, -0.059661865, 0.018051147, 0.010101318, 0.011482239, -0.015991211, 0.038330078, 0.009056091, -0.024017334, 0.058532715, 0.024673462, -0.031585693, -0.037017822, -0.06225586, -0.01934...
8
-2,985,814,541,728,632,300
متى بدأ وانتهى الغرب المتوحش؟
[ "1865", "1865", "1895", "1895" ]
[ -0.019302368, 0.03262329, -0.00094509125, -0.022247314, 0.05480957, 0.031051636, 0.011741638, -0.068115234, -0.015716553, -0.01235199, 0.008758545, -0.082336426, 0.024276733, -0.017410278, -0.044952393, 0.008491516, 0.0077819824, 0.004611969, 0.028030396, 0.014854431, 0.00660...
10
8,783,251,888,656,510,000
متى لعب الرام في السوبر بوا؟
[ "1999", "1999" ]
[ 0.003932953, 0.04788208, -0.023391724, 0.0058631897, 0.017608643, 0.007820129, 0.010124207, -0.05657959, 0.0038375854, -0.022064209, 0.0024585724, 0.042816162, 0.00983429, 0.0206604, -0.0055389404, 0.013267517, -0.008285522, 0.008522034, 0.007484436, 0.012390137, 0.02645874, ...
11
-1,788,552,224,102,144,300
من الذي غنى الأغنية يا له من ليلة
[ "The Four Seasons" ]
[ -0.022018433, -0.012786865, -0.0435791, 0.026016235, 0.029403687, 0.02659607, 0.0390625, -0.029891968, 0.00605011, -0.0019512177, 0.014099121, 0.05734253, 0.024032593, 0.03201294, 0.013214111, 0.035064697, 0.0046463013, 0.024215698, 0.0011987686, -0.029464722, -0.0008044243, ...
12
6,515,983,275,149,804,000
i want to be with you everywhere من غني اغنيه
[ "فليتوود ماك" ]
[ -0.028503418, -0.005306244, 0.0018148422, -0.007347107, 0.030410767, 0.029083252, 0.005832672, -0.040863037, -0.010505676, -0.02305603, 0.020553589, 0.059570312, -0.002714157, 0.0025787354, 0.006866455, -0.0039596558, 0.0234375, -0.009666443, -0.021469116, -0.035614014, -0.02...
13
5,146,730,298,316,783,000
من لعب دور ماغدلين في يسوع المسيح سوبر ستار؟
[ "إيفون إليمان" ]
[ -0.017684937, 0.02281189, -0.01876831, -0.010314941, 0.024871826, 0.007598877, 0.01838684, -0.056549072, -0.0045051575, 0.012138367, 0.047210693, 0.005874634, 0.03466797, 0.011642456, 0.014518738, 0.017623901, -0.013702393, -0.007789612, -0.00831604, -0.000016510487, 0.015792...
14
3,215,481,144,081,057,000
؟lamentations in the bibleمن الذي كتب كتاب
[ "إرميا", "النبي إرميا" ]
[ -0.024246216, 0.027679443, -0.0435791, 0.023406982, 0.0073280334, -0.045318604, 0.010406494, -0.0345459, 0.029907227, -0.07232666, 0.009338379, 0.04449463, -0.011657715, 0.03353882, -0.061798096, 0.027938843, -0.0005598068, 0.007537842, 0.024490356, 0.032440186, 0.013725281, ...
15
-4,767,536,963,343,233,000
من لديه اكثر عدد معجبين علي انستاجرام
[ "كريستيانو رونالدو", "كريستيانو رونالدو دوس سانتوس أفيرو", "كرستيانو رونالدو" ]
[ 0.038482666, 0.07006836, 0.014656067, 0.014640808, 0.04043579, 0.06323242, 0.00756073, -0.06097412, -0.023986816, 0.027267456, 0.07116699, 0.013053894, 0.025772095, -0.022399902, 0.05596924, -0.013404846, 0.0013170242, -0.019714355, -0.03729248, -0.034118652, -0.009155273, ...
16
4,412,615,293,667,766,000
ما الحلقة من المكتب هي الشواء؟
[ "تخفيف التوتر" ]
[ 0.015083313, 0.03918457, -0.08251953, 0.02357483, 0.016983032, -0.013420105, 0.010658264, -0.044830322, -0.014961243, 0.009109497, 0.028274536, 0.026397705, 0.027877808, -0.004638672, 0.019622803, 0.006011963, 0.013618469, -0.03164673, 0.015777588, -0.0029182434, -0.014945984...
End of preview. Expand in Data Studio

Dataset Description

This is the MKQA with query embeddings, which can be used jointly with Multilingual Embeddings for Wikipedia in 300+ Languages for doing multilingual passage retrieval, since the vectors are calculated via the same embedder Cohere Embed v3.

For more details about Global-MMLU, see the official dataset repo.

If you use our embedded queries, we kindly ask you to cite our work in which we created these embeddings:

@inproceedings{qi-etal-2025-consistency,
    title = "On the Consistency of Multilingual Context Utilization in Retrieval-Augmented Generation",
    author = "Qi, Jirui  and
      Fern{\'a}ndez, Raquel  and
      Bisazza, Arianna",
    editor = "Adelani, David Ifeoluwa  and
      Arnett, Catherine  and
      Ataman, Duygu  and
      Chang, Tyler A.  and
      Gonen, Hila  and
      Raja, Rahul  and
      Schmidt, Fabian  and
      Stap, David  and
      Wang, Jiayi",
    booktitle = "Proceedings of the 5th Workshop on Multilingual Representation Learning (MRL 2025)",
    month = nov,
    year = "2025",
    address = "Suzhuo, China",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthology.org/2025.mrl-main.15/",
    doi = "10.18653/v1/2025.mrl-main.15",
    pages = "199--225",
    ISBN = "979-8-89176-345-6",
    abstract = "Retrieval-augmented generation (RAG) with large language models (LLMs) has demonstrated strong performance in multilingual question-answering (QA) tasks by leveraging relevant passages retrieved from corpora. In multilingual RAG (mRAG), the retrieved passages can be written in languages other than that of the query entered by the user, making it challenging for LLMs to effectively utilize the provided information. Recent research suggests that retrieving passages from multilingual corpora can improve RAG performance, particularly for low-resource languages. However, the extent to which LLMs can leverage different kinds of multilingual contexts to generate accurate answers, independently from retrieval quality, remains understudied. In this paper, we conduct an extensive assessment of LLMs' ability to (i) make consistent use of a relevant passage regardless of its language, (ii) respond in the expected language, and (iii) focus on the relevant passage even when multiple `distracting passages' in different languages are provided in the context. Our experiments with four LLMs across three QA datasets covering 48 languages reveal a surprising ability of LLMs to extract relevant information from passages in a different language than the query, but a much weaker ability to produce a full answer in the correct language. Our analysis, based on both accuracy and feature attribution techniques, further shows that distracting passages negatively impact answer quality regardless of their language. However, distractors in the query language exert a slightly stronger influence. Taken together, our findings deepen the understanding of how LLMs utilize context in mRAG systems, providing directions for future improvements. All codes and data are released at https://github.com/Betswish/mRAG-Context-Consistency."
}

Please also cite the MKQA dataset as follows:

@article{longpre-etal-2021-mkqa,
    title = "{MKQA}: A Linguistically Diverse Benchmark for Multilingual Open Domain Question Answering",
    author = "Longpre, Shayne  and
      Lu, Yi  and
      Daiber, Joachim",
    editor = "Roark, Brian  and
      Nenkova, Ani",
    journal = "Transactions of the Association for Computational Linguistics",
    volume = "9",
    year = "2021",
    address = "Cambridge, MA",
    publisher = "MIT Press",
    url = "https://aclanthology.org/2021.tacl-1.82/",
    doi = "10.1162/tacl_a_00433",
    pages = "1389--1406",
    abstract = "Progress in cross-lingual modeling depends on challenging, realistic, and diverse evaluation sets. We introduce Multilingual Knowledge Questions and Answers (MKQA), an open- domain question answering evaluation set comprising 10k question-answer pairs aligned across 26 typologically diverse languages (260k question-answer pairs in total). Answers are based on heavily curated, language- independent data representation, making results comparable across languages and independent of language-specific passages. With 26 languages, this dataset supplies the widest range of languages to-date for evaluating question answering. We benchmark a variety of state- of-the-art methods and baselines for generative and extractive question answering, trained on Natural Questions, in zero shot and translation settings. Results indicate this dataset is challenging even in English, but especially in low-resource languages.1"
}
Downloads last month
74