dbogdan commited on
Commit
3f6d2c1
·
verified ·
1 Parent(s): 66e0bbd

Add files using upload-large-folder tool

Browse files
Files changed (1) hide show
  1. dataset.py +83 -0
dataset.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from datasets import DatasetInfo, GeneratorBasedBuilder, SplitGenerator, Split, Features, Value
4
+
5
+
6
+ class WikiTableQuestions(GeneratorBasedBuilder):
7
+ """
8
+ A simple Hugging Face dataset builder for evaluating question-answering (QA)
9
+ over tabular data, using file paths as context (CSV, HTML, TSV).
10
+
11
+ The dataset is loaded from a JSON file containing QA samples and context file paths.
12
+ """
13
+
14
+ def _info(self):
15
+ """
16
+ Returns the metadata and schema of the dataset.
17
+
18
+ Returns:
19
+ DatasetInfo: Contains description, features (schema), and supervised keys.
20
+ """
21
+ return DatasetInfo(
22
+ description="QA over tabular data with file paths as context",
23
+ features=Features({
24
+ "id": Value("string"),
25
+ "utterance": Value("string"),
26
+ "target_value": Value("string"),
27
+ "context": Features({
28
+ "csv": Value("string"),
29
+ "html": Value("string"),
30
+ "tsv": Value("string"),
31
+ }),
32
+ }),
33
+ supervised_keys=None,
34
+ )
35
+
36
+ def _split_generators(self, dl_manager):
37
+ """
38
+ Downloads and defines dataset splits.
39
+
40
+ Args:
41
+ dl_manager (DownloadManager): The Hugging Face datasets download manager.
42
+
43
+ Returns:
44
+ List[SplitGenerator]: A list containing a single test split generator.
45
+ """
46
+ downloaded_files = dl_manager.download({
47
+ "train": "examples/examples-train.json",
48
+ "test": "examples/examples-test.json"
49
+ })
50
+ return [
51
+ SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
52
+ SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
53
+ ]
54
+
55
+ def _generate_examples(self, filepath):
56
+ """
57
+ Yields examples from the dataset JSON file.
58
+
59
+ Each example consists of a question, target value, and paths to context files
60
+ (CSV, HTML, TSV). The relative paths are resolved into absolute paths based
61
+ on the JSON file's directory.
62
+
63
+ Args:
64
+ filepath (str): Path to the JSON file containing dataset examples.
65
+
66
+ Yields:
67
+ Tuple[int, dict]: A tuple of the index and the data sample dictionary.
68
+ """
69
+ import json
70
+ with open(filepath, encoding="utf-8") as f:
71
+ data = json.load(f)
72
+
73
+ for i, item in enumerate(data):
74
+ yield i, {
75
+ "id": item["id"],
76
+ "utterance": item["utterance"],
77
+ "target_value": item["target_value"],
78
+ "context": {
79
+ "csv": item["context"]["csv"],
80
+ "html": item["context"]["html"],
81
+ "tsv": item["context"]["tsv"],
82
+ },
83
+ }