momentino commited on
Commit
5a96ab6
·
verified ·
1 Parent(s): 761cd90

Upload working-memory.py

Browse files
Files changed (1) hide show
  1. working-memory.py +127 -0
working-memory.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import json
17
+ import datasets
18
+ from pathlib import Path
19
+
20
+
21
+ _CITATION = """\
22
+ @inproceedings{gong2024working,
23
+ title={Working memory capacity of ChatGPT: An empirical study},
24
+ author={Gong, Dongyu and Wan, Xingchen and Wang, Dingmin},
25
+ booktitle={Proceedings of the AAAI conference on artificial intelligence},
26
+ volume={38},
27
+ number={9},
28
+ pages={10048--10056},
29
+ year={2024}
30
+ }
31
+ """
32
+
33
+ _DESCRIPTION = """\
34
+ A benchmark for evaluating Working Memory capabilities in LLMs. Here only the data for the three base 'verbal' experiments are provided."""
35
+
36
+ _HOMEPAGE = "https://github.com/Daniel-Gong/ChatGPT-WM"
37
+
38
+ _LICENSE = "MIT"
39
+
40
+ _URLS_prefix = {
41
+ "verbal" : "https://raw.githubusercontent.com/momentino/playpen_eval/main/frameworks/playpen_eval_benchmarks/tasks/wm/data/json/verbal",
42
+ }
43
+ _URLS = {
44
+ "verbal_1back": {
45
+ "test": _URLS_prefix["verbal"] + "/1back.json"
46
+ },
47
+ "verbal_2back": {
48
+ "test": _URLS_prefix["verbal"] + "/2back.json"
49
+ },
50
+ "verbal_3back": {
51
+ "test": _URLS_prefix["verbal"] + "/3back.json"
52
+ }
53
+ }
54
+
55
+ class WorkingMemory(datasets.GeneratorBasedBuilder):
56
+
57
+ BUILDER_CONFIGS = [
58
+ datasets.BuilderConfig(
59
+ name=config_name,
60
+ version=datasets.Version("0.0.1"),
61
+ description=f"{config_name} task from WorkingMemory"
62
+ )
63
+ for config_name in _URLS.keys()
64
+ ]
65
+ def _info(self):
66
+ features = {
67
+ "stimuli": datasets.Value("string"),
68
+ "target": datasets.Value("string")
69
+ }
70
+ features = datasets.Features(features)
71
+ return datasets.DatasetInfo(
72
+ description=_DESCRIPTION,
73
+ features=features,
74
+ homepage=_HOMEPAGE,
75
+ citation=_CITATION,
76
+ license=_LICENSE,
77
+ )
78
+
79
+ """def _split_generators(self, dl_manager):
80
+ data_dir = Path("path/to/your/local/folder") # Use Path object
81
+ subset_dirs = [d for d in data_dir.iterdir() if d.is_dir()] # Get only directories
82
+
83
+ split_generators = []
84
+ for subset_dir in subset_dirs:
85
+ for i in range(50): # Create at least 50 splits per subset
86
+ split_generators.append(
87
+ datasets.SplitGenerator(
88
+ name=f"{subset_dir.name}_split_{i}",
89
+ gen_kwargs={
90
+ "filepath": str(subset_dir),
91
+ "split": f"{subset_dir.name}_split_{i}",
92
+ },
93
+ )
94
+ )
95
+
96
+ return split_generators"""
97
+
98
+ def _split_generators(self, dl_manager):
99
+ urls = _URLS[self.config.name]
100
+ data_dir = dl_manager.download_and_extract(urls)
101
+
102
+ with open(data_dir["test"], encoding="utf-8") as fin:
103
+ data = json.load(fin)
104
+
105
+ # Create one split per instance, naming them uniquely
106
+ splits = []
107
+ for idx in range(len(data)):
108
+ splits.append(
109
+ datasets.SplitGenerator(
110
+ # Name splits as "test_0", "test_1", etc.
111
+ name=f"{idx}",
112
+ gen_kwargs={
113
+ "filepath": data_dir["test"],
114
+ "index": idx,
115
+ }
116
+ )
117
+ )
118
+ return splits
119
+
120
+
121
+ def _generate_examples(self, filepath, index):
122
+ # Open the JSON file and load the instance at the provided index
123
+ with open(filepath, encoding="utf-8") as fin:
124
+ data = json.load(fin)
125
+ for id,instance in enumerate(data[index]):
126
+ # Yield using the instance id as key (make sure it's unique)
127
+ yield id, instance