File size: 20,856 Bytes
de1a0db
6da41a2
de1a0db
 
 
 
 
 
 
92276f5
9c6aec4
bdf9a5b
 
 
92276f5
 
 
 
f7f531a
 
 
 
67007da
 
 
 
e3cb874
 
 
 
bcc0323
 
 
 
3502c7a
 
 
 
95ff839
 
 
87672ad
 
92276f5
9c6aec4
bdf9a5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3502c7a
92276f5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f7f531a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67007da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e3cb874
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bcc0323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3502c7a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95ff839
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87672ad
 
 
 
de1a0db
 
 
64dd148
 
 
87672ad
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
f1bcb4a
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
 
 
 
de1a0db
de44841
de1a0db
de44841
 
 
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
 
 
 
 
 
 
 
 
 
b2eeafd
de44841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b890066
de44841
 
 
 
 
 
 
 
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
b2eeafd
de44841
 
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
 
 
de1a0db
de44841
de1a0db
b890066
 
 
 
 
de1a0db
b890066
 
de1a0db
b890066
 
de1a0db
b890066
 
de1a0db
b890066
 
de1a0db
b890066
 
 
 
 
de1a0db
 
01ea91a
a99882a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01ea91a
de1a0db
 
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
 
 
de1a0db
de44841
de1a0db
de44841
 
 
 
de1a0db
de44841
de1a0db
de44841
 
 
 
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
de1a0db
b2eeafd
de1a0db
de44841
de1a0db
de44841
de1a0db
de44841
 
 
0f2f49a
5174bfe
de44841
0f2f49a
de44841
 
de1a0db
0f2f49a
de1a0db
de44841
de1a0db
de44841
 
 
 
de1a0db
 
de44841
de1a0db
de44841
 
 
de1a0db
de44841
 
9ee1491
de1a0db
de44841
de1a0db
de44841
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
---
license: mit
multilinguality: multilingual
task_categories:
- multiple-choice
pretty_name: Tokenization Robustness
tags:
- multilingual
- tokenization
configs:
- config_name: farsi_tokenizer_robustness_canonical
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_cannonical/test-*
- config_name: farsi_tokenizer_robustness_code_language_script_switching
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_code_language_script_switching/test-*
- config_name: farsi_tokenizer_robustness_colloquial
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_colloquial/test-*
- config_name: farsi_tokenizer_robustness_diacritics_presence_absence
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_diacritics_presence_absence/test-*
- config_name: farsi_tokenizer_robustness_keyboard_proximity_errors
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_keyboard_proximity_errors/test-*
- config_name: farsi_tokenizer_robustness_romanization
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_romanization/test-*
- config_name: farsi_tokenizer_robustness_word_reordering
  data_files:
  - split: test
    path: farsi_tokenizer_robustness_word_reordering/test-*
- config_name: farsi_tokenizer_robustness_word_spacing_zero-width_characters_extra_space
  data_files:
  - split: test
    path: >-
      farsi_tokenizer_robustness_word_spacing_zero-width_characters_extra_space/test-*
dataset_info:
- config_name: farsi_tokenizer_robustness_canonical
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 12118
    num_examples: 45
  download_size: 10753
  dataset_size: 12118
- config_name: farsi_tokenizer_robustness_code_language_script_switching
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 10823
    num_examples: 45
  download_size: 9238
  dataset_size: 10823
- config_name: farsi_tokenizer_robustness_colloquial
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 9788
    num_examples: 45
  download_size: 9247
  dataset_size: 9788
- config_name: farsi_tokenizer_robustness_diacritics_presence_absence
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 12047
    num_examples: 45
  download_size: 10143
  dataset_size: 12047
- config_name: farsi_tokenizer_robustness_keyboard_proximity_errors
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 10835
    num_examples: 45
  download_size: 9474
  dataset_size: 10835
- config_name: farsi_tokenizer_robustness_romanization
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 8399
    num_examples: 45
  download_size: 8953
  dataset_size: 8399
- config_name: farsi_tokenizer_robustness_word_reordering
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 10883
    num_examples: 45
  download_size: 9556
  dataset_size: 10883
- config_name: farsi_tokenizer_robustness_word_spacing_zero-width_characters_extra_space
  features:
  - name: question
    dtype: string
  - name: choices
    list: string
  - name: answer
    dtype: int64
  - name: answer_label
    dtype: string
  - name: split
    dtype: string
  - name: subcategories
    dtype: string
  - name: lang
    dtype: string
  - name: second_lang
    dtype: string
  - name: coding_lang
    dtype: string
  - name: notes
    dtype: string
  - name: id
    dtype: string
  - name: set_id
    dtype: float64
  - name: variation_id
    dtype: float64
  splits:
  - name: test
    num_bytes: 12666
    num_examples: 45
  download_size: 10010
  dataset_size: 12666
language:
- fa
size_categories:
- n<1K
---
<!-- Provide a quick summary of the dataset. -->

<img src="toksuite-logo.png" alt="TokSuite Logo" width="250px" style="margin-left:'auto' margin-right:'auto' display:'block'"/>

# TokSuite Benchmark (Farsi Collection)


## Dataset Description

This dataset is part of **TokSuite**, a comprehensive benchmark designed to measure how different tokenization strategies affect language model performance and robustness. This specific subset contains Farsi (Persian) language multiple-choice text completion questions with various real-world perturbations that test tokenizer robustness.

- **Curated by:** R3 Research Team
- **Language(s):** Farsi/Persian (fa)
- **License:** MIT License

### Dataset Summary

TokSuite addresses a fundamental challenge in language model research: understanding how tokenization choices impact model behavior in isolation. The Farsi subset specifically measures model performance on canonical questions and various perturbations including orthographic variations, diacritics, morphological challenges, and noise commonly encountered when processing Farsi text.

**Key Features:**
- 45 canonical questions covering general knowledge, geography, science, and language understanding
- Multiple perturbation types reflecting real-world text variations in Farsi
- Parallel structure with TokSuite benchmark (available in English, Turkish, Italian, Chinese)
- Native speaker curation ensuring linguistic authenticity

### Supported Tasks

- **Multiple-Choice Question Answering**: Text completion format with 4 answer choices
- **Tokenizer Robustness Evaluation**: Measuring performance degradation under various text perturbations
- **Multilingual NLP Benchmarking**: Evaluating language models on Farsi text understanding

### Languages

The dataset contains text in Farsi (Persian) written in Arabic script (language code: `pes_Arab` / `fa`).

## Dataset Structure

### Data Instances

An example from the dataset:

```json
{
  "question": "رنگ آسمان",
  "choices": ["آبی است", "قرمز است", "سبز است", "زرد است"],
  "answer": 0,
  "answer_label": "A",
  "split": "test",
  "subcategories": "Canonical",
  "lang": "pes_Arab",
  "second_lang": "The color of the sky is",
  "coding_lang": "",
  "notes": "The color of the sky is",
  "id": "301",
  "set_id": 301.0,
  "variation_id": 1.0
}
```

### Data Fields

| Field | Type | Description |
|-------|------|-------------|
| `question` | `string` | The question text in Farsi (Persian Arabic script) |
| `choices` | `list[string]` | Four multiple-choice answer options in Farsi |
| `answer` | `int64` | Index of the correct answer (0-3) |
| `answer_label` | `string` | Letter label of the correct answer (A, B, C, or D) |
| `split` | `string` | Dataset split identifier (all entries are "test") |
| `subcategories` | `string` | Perturbation category |
| `lang` | `string` | Language code (pes_Arab = Persian/Farsi in Arabic script) |
| `second_lang` | `string` | English translation or description of the question |
| `coding_lang` | `string` | Not applicable for this dataset (empty string) |
| `notes` | `string` | Additional context about the question or perturbation type |
| `id` | `string` | Unique question identifier |
| `set_id` | `float64` | Question set grouping identifier (ranges from 300-344) |
| `variation_id` | `float64` | Variation number within a question set |


## Dataset Creation

### Curation Rationale

This dataset was created to:
1. Systematically evaluate how different tokenization strategies handle Farsi text
2. Measure robustness against real-world text perturbations specific to Farsi language
3. Support research into tokenization's impact on language model behavior
4. Provide standardized benchmarks for Farsi language models

The questions were designed to be straightforward with high baseline accuracy, allowing researchers to cleanly measure performance degradation when perturbations are applied.

### Source Data

#### Data Collection and Processing

- **Canonical Questions**: 40 baseline questions in English were created covering general knowledge topics
- **Translation**: Native Farsi speakers translated questions to Persian
- **Perturbations**: Each question underwent targeted perturbations designed to reflect morphological and orthographic characteristics of Farsi
- **Validation**: Model-in-the-loop process ensured high baseline accuracy across 14 different tokenizers

#### Perturbation Categories

1. **Canonical**
The baseline/standard form of Farsi text without any modifications, used as the reference point for comparing other perturbations.

2. **Code Language Script Switching**
Mixing Farsi with English language (code-switching), randomly switching between Farsi and English words mid-sentence.

3. **Colloquial**
Using informal, conversational Farsi instead of formal written language, including slang, dialectal variations, and everyday speech patterns.

4. **Diacritics Presence/Absence**
Adding diacritical marks (vowel markings and other pronunciation indicators) that can be optionally included in Farsi text, which affects how words are read.

5. **Keyboard Proximity Errors**
Typos caused by hitting adjacent keys on a keyboard, simulating common typing mistakes where the wrong character is typed due to finger placement.

6. **Romanization**
Converting Farsi text to Finglish—writing Farsi words using English/Latin letters instead of Persian script.

7. **Word Reordering**
Changing the order of words in sentences, testing whether tokenizers can handle different syntactic arrangements.

8. **Word Spacing, Zero-Width Characters, Extra Space**
Manipulating spacing between words by adding extra spaces, removing spaces, or inserting invisible zero-width characters that affect how text is segmented.


#### Model Performance Comparison
| model_name   |   canonical |   arabic_keyboard_for_farsi |   code_language_script_switching |   colloquial |   dialects |   equivalent_expressions |   keyboard_proximity_errors |   number_romanization |   optional_diacritics |   romanization |   spelled_out |   word_spacing_zero-width_characters_extra_space |
|:-------------|------------:|----------------------------:|---------------------------------:|-------------:|-----------:|-------------------------:|----------------------------:|----------------------:|----------------------:|---------------:|--------------:|-------------------------------------------------:|
| Aya          |       0.78  |                       0.346 |                            0.717 |        0.661 |      0.529 |                    0.607 |                       0.409 |                 0.744 |                 0.438 |          0.346 |         0.458 |                                            0.557 |
| BLOOM        |       0.775 |                       0.448 |                            0.77  |        0.6   |      0.505 |                    0.675 |                       0.571 |                 0.669 |                 0.505 |          0.276 |         0.542 |                                            0.589 |
| ByT5         |       0.769 |                       0.478 |                            0.719 |        0.591 |      0.531 |                    0.616 |                       0.527 |                 0.568 |                 0.446 |          0.28  |         0.337 |                                            0.476 |
| Comma        |       0.79  |                       0.471 |                            0.66  |        0.652 |      0.523 |                    0.66  |                       0.503 |                 0.617 |                 0.457 |          0.449 |         0.291 |                                            0.484 |
| GPT-2        |       0.78  |                       0.569 |                            0.672 |        0.739 |      0.545 |                    0.66  |                       0.616 |                 0.498 |                 0.436 |          0.298 |         0.449 |                                            0.573 |
| GPT-4o       |       0.75  |                       0.406 |                            0.744 |        0.669 |      0.504 |                    0.744 |                       0.588 |                 0.752 |                 0.375 |          0.306 |         0.466 |                                            0.544 |
| Gemma-2      |       0.75  |                       0.375 |                            0.569 |        0.688 |      0.475 |                    0.712 |                       0.544 |                 0.44  |                 0.431 |          0.425 |         0.446 |                                            0.5   |
| Llama-3.2    |       0.743 |                       0.355 |                            0.688 |        0.587 |      0.55  |                    0.675 |                       0.499 |                 0.907 |                 0.291 |          0.304 |         0.429 |                                            0.46  |
| Phi-3        |       0.82  |                       0.48  |                            0.675 |        0.593 |      0.501 |                    0.63  |                       0.542 |                 0.555 |                 0.493 |          0.328 |         0.469 |                                            0.593 |
| Qwen-3       |       0.857 |                       0.428 |                            0.643 |        0.545 |      0.541 |                    0.59  |                       0.534 |                 0.644 |                 0.455 |          0.252 |         0.384 |                                            0.473 |
| Tekken       |       0.842 |                       0.481 |                            0.743 |        0.594 |      0.51  |                    0.697 |                       0.561 |                 0.853 |                 0.449 |          0.318 |         0.522 |                                            0.547 |
| TokenMonster |       0.714 |                       0.533 |                            0.622 |        0.671 |      0.521 |                    0.61  |                       0.542 |                 0.728 |                 0.523 |          0.352 |         0.318 |                                            0.519 |
| XGLM         |       0.757 |                       0.499 |                            0.669 |        0.558 |      0.522 |                    0.706 |                       0.539 |                 0.644 |                 0.462 |          0.297 |         0.415 |                                            0.559 |
| mBERT        |       0.746 |                       0.377 |                            0.678 |        0.678 |      0.508 |                    0.659 |                       0.585 |                 0.402 |                 0.547 |          0.414 |         0.296 |                                            0.659 |

#### Who are the source data producers?

Native Farsi speakers curated and validated all questions and perturbations. The TokSuite research team at R3 designed the overall benchmark framework.

### Annotations

#### Annotation process

Questions were manually created and translated by native speakers. Each perturbation was carefully designed to reflect authentic variations encountered in real-world Farsi text processing.

#### Who are the annotators?

Native Farsi speakers with expertise in linguistics and NLP, working as part of the TokSuite project.

### Personal and Sensitive Information

The dataset contains only general knowledge questions and does not include any personal or sensitive information.

## Considerations for Using the Data

### Social Impact of Dataset

This dataset contributes to improving language technology for Farsi speakers by:
- Enabling better understanding of tokenization challenges in Persian
- Supporting development of more robust multilingual models
- Providing standardized evaluation for Farsi NLP research

### Discussion of Biases

- **Language variety**: The dataset uses Modern Standard Persian and may not fully represent dialectal variations
- **Script focus**: Only Arabic script is used; romanized versions are included as perturbations
- **Domain coverage**: Questions focus on general knowledge and may not represent domain-specific language use
- **Question simplicity**: Designed for high baseline accuracy, which may not reflect real-world task complexity

### Other Known Limitations

- Relatively small dataset size (designed for evaluation, not training)
- Focus on multiple-choice format may not capture all aspects of language understanding
- Perturbations are specific to Farsi's characteristics and findings may not generalize to all languages
- Models evaluated were trained at ~1B parameters; results may differ at larger scales

## Additional Information

### Dataset Curators

The dataset was curated by the TokSuite research team at R3.

### Licensing Information

MIT license 

### Citation Information

If you use this dataset in your research, please cite the TokSuite paper:

```bibtex
@inproceedings{toksuite2026,
  title={TokSuite: Measuring the Impact of Tokenizer Choice on Language Model Behavior},
  author={Altıntaş, Gül Sena and Ehghaghi, Malikeh and Lester, Brian and Liu, Fengyuan and Zhao, Wanru and Ciccone, Marco and Raffel, Colin},
  booktitle={Preprint.},
  year={2026},
  url={TBD}
}
```

**Paper**: [TokSuite: Measuring the Impact of Tokenizer Choice on Language Model Behavior](TBD)

### Contributions

This dataset is part of TokSuite, which includes:
- 14 language models with identical architectures but different tokenizers
- Multilingual benchmark datasets (English, Turkish, Italian, Farsi, Chinese)
- Comprehensive analysis of tokenization's impact on model behavior


### Contact

For questions or issues related to this dataset, please refer to the TokSuite project or contact the authors through the paper submission system.

---

<div align="center">
  
**Part of the [TokSuite Project](TBD)**

*Understanding Tokenization's Role in Language Model Behavior*

</div>