Datasets:
Uploading tokenizer_robustness_completion_italian_phonetic_spelling subset
Browse files
README.md
CHANGED
|
@@ -1520,6 +1520,132 @@ dataset_info:
|
|
| 1520 |
num_examples: 92
|
| 1521 |
download_size: 52569
|
| 1522 |
dataset_size: 54975
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1523 |
configs:
|
| 1524 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 1525 |
data_files:
|
|
@@ -1569,6 +1695,10 @@ configs:
|
|
| 1569 |
data_files:
|
| 1570 |
- split: test
|
| 1571 |
path: tokenizer_robustness_completion_italian_orthographic_errors/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1572 |
---
|
| 1573 |
|
| 1574 |
# Dataset Card for Tokenization Robustness
|
|
|
|
| 1520 |
num_examples: 92
|
| 1521 |
download_size: 52569
|
| 1522 |
dataset_size: 54975
|
| 1523 |
+
- config_name: tokenizer_robustness_completion_italian_phonetic_spelling
|
| 1524 |
+
features:
|
| 1525 |
+
- name: question
|
| 1526 |
+
dtype: string
|
| 1527 |
+
- name: choices
|
| 1528 |
+
list: string
|
| 1529 |
+
- name: answer
|
| 1530 |
+
dtype: int64
|
| 1531 |
+
- name: answer_label
|
| 1532 |
+
dtype: string
|
| 1533 |
+
- name: split
|
| 1534 |
+
dtype: string
|
| 1535 |
+
- name: subcategories
|
| 1536 |
+
dtype: string
|
| 1537 |
+
- name: category
|
| 1538 |
+
dtype: string
|
| 1539 |
+
- name: lang
|
| 1540 |
+
dtype: string
|
| 1541 |
+
- name: second_lang
|
| 1542 |
+
dtype: string
|
| 1543 |
+
- name: notes
|
| 1544 |
+
dtype: string
|
| 1545 |
+
- name: id
|
| 1546 |
+
dtype: string
|
| 1547 |
+
- name: set_id
|
| 1548 |
+
dtype: string
|
| 1549 |
+
- name: variation_id
|
| 1550 |
+
dtype: string
|
| 1551 |
+
- name: perturbed_word
|
| 1552 |
+
dtype: string
|
| 1553 |
+
- name: vanilla_cos_sim_to_canonical
|
| 1554 |
+
struct:
|
| 1555 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1556 |
+
dtype: float64
|
| 1557 |
+
- name: Qwen/Qwen3-8B
|
| 1558 |
+
dtype: float64
|
| 1559 |
+
- name: bigscience/bloom
|
| 1560 |
+
dtype: float64
|
| 1561 |
+
- name: common-pile/comma-v0.1-1t
|
| 1562 |
+
dtype: float64
|
| 1563 |
+
- name: facebook/xglm-564M
|
| 1564 |
+
dtype: float64
|
| 1565 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1566 |
+
dtype: float64
|
| 1567 |
+
- name: google/byt5-small
|
| 1568 |
+
dtype: float64
|
| 1569 |
+
- name: google/gemma-2-2b
|
| 1570 |
+
dtype: float64
|
| 1571 |
+
- name: gpt2
|
| 1572 |
+
dtype: float64
|
| 1573 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1574 |
+
dtype: float64
|
| 1575 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1576 |
+
dtype: float64
|
| 1577 |
+
- name: mistralai/tekken
|
| 1578 |
+
dtype: float64
|
| 1579 |
+
- name: tiktoken/gpt-4o
|
| 1580 |
+
dtype: float64
|
| 1581 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1582 |
+
dtype: float64
|
| 1583 |
+
- name: trimmed_cos_sim_to_canonical
|
| 1584 |
+
struct:
|
| 1585 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1586 |
+
dtype: float64
|
| 1587 |
+
- name: Qwen/Qwen3-8B
|
| 1588 |
+
dtype: float64
|
| 1589 |
+
- name: bigscience/bloom
|
| 1590 |
+
dtype: float64
|
| 1591 |
+
- name: common-pile/comma-v0.1-1t
|
| 1592 |
+
dtype: float64
|
| 1593 |
+
- name: facebook/xglm-564M
|
| 1594 |
+
dtype: float64
|
| 1595 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1596 |
+
dtype: float64
|
| 1597 |
+
- name: google/byt5-small
|
| 1598 |
+
dtype: float64
|
| 1599 |
+
- name: google/gemma-2-2b
|
| 1600 |
+
dtype: float64
|
| 1601 |
+
- name: gpt2
|
| 1602 |
+
dtype: float64
|
| 1603 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1604 |
+
dtype: float64
|
| 1605 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1606 |
+
dtype: float64
|
| 1607 |
+
- name: mistralai/tekken
|
| 1608 |
+
dtype: float64
|
| 1609 |
+
- name: tiktoken/gpt-4o
|
| 1610 |
+
dtype: float64
|
| 1611 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1612 |
+
dtype: float64
|
| 1613 |
+
- name: token_counts
|
| 1614 |
+
struct:
|
| 1615 |
+
- name: CohereLabs/aya-expanse-8b
|
| 1616 |
+
dtype: int64
|
| 1617 |
+
- name: Qwen/Qwen3-8B
|
| 1618 |
+
dtype: int64
|
| 1619 |
+
- name: bigscience/bloom
|
| 1620 |
+
dtype: int64
|
| 1621 |
+
- name: common-pile/comma-v0.1-1t
|
| 1622 |
+
dtype: int64
|
| 1623 |
+
- name: facebook/xglm-564M
|
| 1624 |
+
dtype: int64
|
| 1625 |
+
- name: google-bert/bert-base-multilingual-cased
|
| 1626 |
+
dtype: int64
|
| 1627 |
+
- name: google/byt5-small
|
| 1628 |
+
dtype: int64
|
| 1629 |
+
- name: google/gemma-2-2b
|
| 1630 |
+
dtype: int64
|
| 1631 |
+
- name: gpt2
|
| 1632 |
+
dtype: int64
|
| 1633 |
+
- name: meta-llama/Llama-3.2-1B
|
| 1634 |
+
dtype: int64
|
| 1635 |
+
- name: microsoft/Phi-3-mini-4k-instruct
|
| 1636 |
+
dtype: int64
|
| 1637 |
+
- name: mistralai/tekken
|
| 1638 |
+
dtype: int64
|
| 1639 |
+
- name: tiktoken/gpt-4o
|
| 1640 |
+
dtype: int64
|
| 1641 |
+
- name: tokenmonster/englishcode-32000-consistent-v1
|
| 1642 |
+
dtype: int64
|
| 1643 |
+
splits:
|
| 1644 |
+
- name: test
|
| 1645 |
+
num_bytes: 24151
|
| 1646 |
+
num_examples: 43
|
| 1647 |
+
download_size: 39932
|
| 1648 |
+
dataset_size: 24151
|
| 1649 |
configs:
|
| 1650 |
- config_name: tokenizer_robustness_completion_italian_abbreviations
|
| 1651 |
data_files:
|
|
|
|
| 1695 |
data_files:
|
| 1696 |
- split: test
|
| 1697 |
path: tokenizer_robustness_completion_italian_orthographic_errors/test-*
|
| 1698 |
+
- config_name: tokenizer_robustness_completion_italian_phonetic_spelling
|
| 1699 |
+
data_files:
|
| 1700 |
+
- split: test
|
| 1701 |
+
path: tokenizer_robustness_completion_italian_phonetic_spelling/test-*
|
| 1702 |
---
|
| 1703 |
|
| 1704 |
# Dataset Card for Tokenization Robustness
|
tokenizer_robustness_completion_italian_phonetic_spelling/test-00000-of-00001.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebeb29546e7dbde71c92c0b21155599e57bb3f22284edd438f2ff3172866e31b
|
| 3 |
+
size 39932
|