ameyhengle commited on
Commit
ec4c18d
·
1 Parent(s): 63b0aac

add setup file

Browse files
Files changed (1) hide show
  1. data_setup.py +87 -0
data_setup.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import datasets
4
+
5
+ class DatasetConfig(datasets.BuilderConfig):
6
+ """BuilderConfig for MyCustomDataset."""
7
+ def __init__(self, name: str, subset_dir: str, **kwargs):
8
+ """
9
+ Args:
10
+ name: The name of the dataset configuration (e.g., '4k', '8k').
11
+ This is crucial for datasets to identify the config.
12
+ subset_dir: The name of the subdirectory within 'data/' for this subset (e.g., "4k").
13
+ **kwargs: Keyword arguments passed to the base BuilderConfig (e.g., 'description').
14
+ """
15
+ super().__init__(name, **kwargs)
16
+ self.subset_dir = subset_dir
17
+
18
+
19
+ # Main dataset builder class
20
+ class MLNeedle(datasets.GeneratorBasedBuilder):
21
+
22
+ VERSION = datasets.Version("1.0.0")
23
+ BUILDER_CONFIGS = [
24
+ DatasetConfig(name="baseline", subset_dir="baseline", description="Baseline subset data."),
25
+ DatasetConfig(name="4k", subset_dir="4k", description="Dataset subset with 4k context size."),
26
+ DatasetConfig(name="8k", subset_dir="8k", description="Dataset subset with 8k context size."),
27
+ DatasetConfig(name="16k", subset_dir="16k", description="Dataset subset with 16k context size."),
28
+ DatasetConfig(name="32k", subset_dir="32k", description="Dataset subset with 32k context size."),
29
+ ]
30
+
31
+ DEFAULT_CONFIG_NAME = "baseline"
32
+
33
+ _BUILDER_CONFIGS_GROUPED_BY_DATASET_NAME = {
34
+ config.name: config for config in BUILDER_CONFIGS
35
+ }
36
+
37
+ def _info(self):
38
+ """Defines the dataset schema and metadata."""
39
+ return datasets.DatasetInfo(
40
+ description="""
41
+ Multilingual Needle in a Haystack dataset for evaluating large language models
42
+ on their ability to retrieve specific information from long contexts across multiple languages.
43
+ Each subset (e.g., 4k, 8k) corresponds to different context lengths, and each split
44
+ (e.g., en, es) represents a language.
45
+ """,
46
+ features=datasets.Features({
47
+ "id": datasets.Value("string"),
48
+ "needle_lang": datasets.Value("string"),
49
+ "question_lang": datasets.Value("string"),
50
+ "distractor_lang": datasets.Value("string"),
51
+ "needle_position": datasets.Value("string"),
52
+ "answer_text_format": datasets.Value("string"),
53
+ "answer_start_index": datasets.Value("int32"),
54
+ "answer_sentence": datasets.Value("string"),
55
+ "prompt": datasets.Value("string"),
56
+ }),
57
+ supervised_keys=None,
58
+ homepage="https://huggingface.co/datasets/ameyhengle/Multilingual-Needle-in-a-Haystack",
59
+ license="MIT",
60
+ )
61
+
62
+ def _generate_examples(self, filepath):
63
+ """
64
+ Args:
65
+ filepath: The full path to the CSV file to be processed.
66
+ """
67
+ with open(filepath, encoding="utf-8") as f:
68
+ reader = csv.DictReader(f)
69
+ for i, row in enumerate(reader):
70
+ try:
71
+ answer_start_index = int(row["answer_start_index"])
72
+ except (ValueError, TypeError):
73
+ print(f"Warning: Could not convert 'answer_start_index' to int for row {i} in {filepath}. Defaulting to 0.")
74
+ answer_start_index = 0
75
+
76
+ # Yield the example, mapping CSV columns to dataset features
77
+ yield i, {
78
+ "id": row["id"],
79
+ "needle_lang": row["needle_lang"],
80
+ "question_lang": row["question_lang"],
81
+ "distractor_lang": row["distractor_lang"],
82
+ "needle_position": row["needle_position"],
83
+ "answer_text_format": row["answer_text_format"],
84
+ "answer_start_index": answer_start_index,
85
+ "answer_sentence": row["answer_sentence"],
86
+ "prompt": row["prompt"],
87
+ }