Datasets:

Modalities:
Image
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
zygg commited on
Commit
272c919
·
verified ·
1 Parent(s): cdb48aa

Upload 3 files

Browse files
Code_Error_Correction/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8cf5f9beb7ce0e4e3e0aae7206b68f064163413fa3c47a4ce8b9ea28ef94701
3
+ size 1359206
Code_Function_Editing/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5371387773cc4d81fb271bdf7fd615de060d688e9f99fa262430b260b7695b1c
3
+ size 993887
webuibench.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ import pandas as pd
4
+
5
+ class WebUIBench(datasets.GeneratorBasedBuilder):
6
+ BUILDER_CONFIG_CLASS = datasets.BuilderConfig
7
+
8
+ # 每个文件夹(除非以"."或"_"开头)都当作一个 subset
9
+ BUILDER_CONFIGS = [
10
+ datasets.BuilderConfig(name=subset, version=datasets.Version("1.0.0"), description=f"Subset: {subset}")
11
+ for subset in os.listdir(os.path.dirname(__file__))
12
+ if os.path.isdir(os.path.join(os.path.dirname(__file__), subset))
13
+ and not subset.startswith(".")
14
+ and not subset.startswith("_")
15
+ and os.path.exists(os.path.join(os.path.dirname(__file__), subset, "test-00000-of-00001.parquet"))
16
+ ]
17
+
18
+ DEFAULT_CONFIG_NAME = BUILDER_CONFIGS[0].name if BUILDER_CONFIGS else None
19
+
20
+ def _info(self):
21
+ return datasets.DatasetInfo(
22
+ description="WebUIBench dataset with multiple subsets.",
23
+ features=datasets.Features({}), # 自动推断 feature
24
+ supervised_keys=None,
25
+ )
26
+
27
+ def _split_generators(self, dl_manager):
28
+ subset_path = os.path.join(os.path.dirname(__file__), self.config.name, "test-00000-of-00001.parquet")
29
+ return [
30
+ datasets.SplitGenerator(
31
+ name=datasets.Split.TEST,
32
+ gen_kwargs={"filepath": subset_path},
33
+ )
34
+ ]
35
+
36
+ def _generate_examples(self, filepath):
37
+ df = pd.read_parquet(filepath)
38
+ for idx, row in df.iterrows():
39
+ yield idx, row.to_dict()