File size: 4,246 Bytes
7f34860
 
887d862
7f34860
 
 
 
 
 
ddf3a8c
 
 
ad01faf
 
 
 
 
887d862
 
 
 
 
 
 
 
b5b5291
887d862
b5b5291
887d862
 
 
 
 
7905d3b
 
 
 
eb93a55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7f34860
 
 
 
 
83ec112
 
887d862
 
 
 
eb93a55
 
 
 
7b26c6b
 
 
 
7f34860
7b26c6b
cff203d
7b26c6b
 
 
 
 
 
 
 
 
 
 
 
30af376
7b26c6b
 
 
 
 
 
 
 
06d8db8
7b26c6b
c134d9f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06d8db8
 
7b26c6b
c134d9f
 
 
 
 
 
 
 
 
 
 
 
7b26c6b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
---
dataset_info:
- config_name: default
  features:
  - name: utterance
    dtype: string
  - name: label
    sequence: int64
  splits:
  - name: train
    num_bytes: 7169122
    num_examples: 9042
  - name: test
    num_bytes: 450937
    num_examples: 358
  download_size: 8973442
  dataset_size: 7620059
- config_name: intents
  features:
  - name: id
    dtype: int64
  - name: name
    dtype: string
  - name: tags
    sequence: 'null'
  - name: regex_full_match
    sequence: 'null'
  - name: regex_partial_match
    sequence: 'null'
  - name: description
    dtype: 'null'
  splits:
  - name: intents
    num_bytes: 291
    num_examples: 10
  download_size: 3034
  dataset_size: 291
- config_name: intentsqwen3-32b
  features:
  - name: id
    dtype: int64
  - name: name
    dtype: string
  - name: tags
    sequence: 'null'
  - name: regex_full_match
    sequence: 'null'
  - name: regex_partial_match
    sequence: 'null'
  - name: description
    dtype: string
  splits:
  - name: intents
    num_bytes: 1282
    num_examples: 10
  download_size: 3939
  dataset_size: 1282
configs:
- config_name: default
  data_files:
  - split: train
    path: data/train-*
  - split: test
    path: data/test-*
- config_name: intents
  data_files:
  - split: intents
    path: intents/intents-*
- config_name: intentsqwen3-32b
  data_files:
  - split: intents
    path: intentsqwen3-32b/intents-*
task_categories:
- text-classification
language:
- en
---

# reuters

This is a text classification dataset. It is intended for machine learning research and experimentation.

This dataset is obtained via formatting another publicly available data to be compatible with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html).

## Usage

It is intended to be used with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):

```python
from autointent import Dataset

reuters = Dataset.from_hub("AutoIntent/reuters")
```

## Source

This dataset is taken from `ucirvine/reuters21578` and formatted with our [AutoIntent Library](https://deeppavlov.github.io/AutoIntent/index.html):

```python
from autointent import Dataset
import datasets



def get_intents_info(ds: datasets.DatasetDict) -> list[str]:
    return sorted(set(name for intents in ds["train"]["topics"] for name in intents))

def parse(ds: datasets.Dataset, intent_names: list[str]) -> list[dict]:
    return [{
        "utterance": example["text"],
        "label": [int(name in example["topics"]) for name in intent_names]
    } for example in ds]


def get_low_resource_classes_mask(ds: list[dict], intent_names: list[str], fraction_thresh: float = 0.01) -> list[bool]:
    res = [0] * len(intent_names)
    for sample in ds:
        for i, indicator in enumerate(sample["label"]):
            res[i] += indicator
    for i in range(len(intent_names)):
        res[i] /= len(ds)
    return [(frac < fraction_thresh) for frac in res]

def remove_low_resource_classes(ds: datasets.Dataset, mask: list[bool]) -> list[dict]:
    res = []
    for sample in ds:
        if sum(sample["label"]) == 1 and mask[sample["label"].index(1)]:
            continue
        sample["label"] = [
            indicator for indicator, low_resource in
            zip(sample["label"], mask, strict=True) if not low_resource
        ]
        res.append(sample)
    return res

def remove_oos(ds: list[dict]):
    return [sample for sample in ds if sum(sample["label"]) != 0]


if __name__ == "__main__":
    reuters = datasets.load_dataset("ucirvine/reuters21578", "ModHayes", trust_remote_code=True)
    intent_names = get_intents_info(reuters)
    train_parsed = parse(reuters["train"], intent_names)
    test_parsed = parse(reuters["test"], intent_names)
    mask = get_low_resource_classes_mask(train_parsed, intent_names)
    intent_names = [name for i, name in enumerate(intent_names) if not mask[i]]
    train_filtered = remove_oos(remove_low_resource_classes(train_parsed, mask))
    test_filtered = remove_oos(remove_low_resource_classes(test_parsed, mask))

    intents = [{"id": i, "name": name} for i, name in enumerate(intent_names)]
    reuters_converted = Dataset.from_dict({"intents": intents, "train": train_filtered, "test": test_filtered})
```