lcalvobartolome commited on
Commit
1dc7cc3
·
1 Parent(s): 6ade985

added custom script for loading data

Browse files
Files changed (2) hide show
  1. README.md +44 -14
  2. proxann_data.py +0 -170
README.md CHANGED
@@ -88,35 +88,65 @@ Each file is a JSON mapping of **token -> integer index** (0–14,999).
88
  | `data_with_embeddings/vocabs/wiki_vocab.json` | Vocabulary for the Wiki corpus. Keys are tokens, values are integer indices. |
89
 
90
  ## Usage Example
 
 
 
 
 
 
 
 
 
91
 
92
  ```python
93
  from datasets import load_dataset
94
 
95
- # Bills
 
 
96
  bills_train = load_dataset(
97
- "lcalvobartolome/proxann_data",
98
- name="bills",
 
 
 
99
  split="train",
100
- trust_remote_code=True,
101
  )
102
- print(len(bills_train)) # 32661
103
 
104
  bills_test = load_dataset(
105
- "lcalvobartolome/proxann_data",
106
- name="bills",
 
 
 
107
  split="test",
108
- trust_remote_code=True,
109
  )
110
- print(bills_test.column_names) # 15242
 
111
 
112
- # Wiki
 
 
113
  wiki_train = load_dataset(
114
- "lcalvobartolome/proxann_data",
115
- name="wiki",
 
 
 
116
  split="train",
117
- trust_remote_code=True,
118
  )
119
- print(len(wiki_train)) # 14290
 
 
 
 
 
 
 
 
 
 
120
  ```
121
 
122
  ## Related Resources
 
88
  | `data_with_embeddings/vocabs/wiki_vocab.json` | Vocabulary for the Wiki corpus. Keys are tokens, values are integer indices. |
89
 
90
  ## Usage Example
91
+ The dataset contains four Parquet files:
92
+
93
+ - `bills_train`
94
+ - `bills_test`
95
+ - `wiki_train`
96
+ - `wiki_test`
97
+
98
+ Because the Bills and Wiki splits use different schemas, you should load each split
99
+ directly from its Parquet file using the generic `parquet` loader from 🤗 Datasets:
100
 
101
  ```python
102
  from datasets import load_dataset
103
 
104
+ # ------------------------------
105
+ # Bills Dataset
106
+ # ------------------------------
107
  bills_train = load_dataset(
108
+ "parquet",
109
+ data_files={
110
+ "train": "hf://datasets/lcalvobartolome/proxann_data@main/"
111
+ "bills_train.metadata.embeddings.jsonl.all-MiniLM-L6-v2.parquet"
112
+ },
113
  split="train",
 
114
  )
115
+ print("Bills train size:", len(bills_train)) # 32661
116
 
117
  bills_test = load_dataset(
118
+ "parquet",
119
+ data_files={
120
+ "test": "hf://datasets/lcalvobartolome/proxann_data@main/"
121
+ "bills_test.metadata.parquet"
122
+ },
123
  split="test",
 
124
  )
125
+ print("Bills test size:", len(bills_test)) # 15242
126
+
127
 
128
+ # ------------------------------
129
+ # Wiki Dataset
130
+ # ------------------------------
131
  wiki_train = load_dataset(
132
+ "parquet",
133
+ data_files={
134
+ "train": "hf://datasets/lcalvobartolome/proxann_data@main/"
135
+ "wiki_train.metadata.embeddings.jsonl.all-MiniLM-L6-v2.parquet"
136
+ },
137
  split="train",
 
138
  )
139
+ print("Wiki train size:", len(wiki_train)) # 14290
140
+
141
+ wiki_test = load_dataset(
142
+ "parquet",
143
+ data_files={
144
+ "test": "hf://datasets/lcalvobartolome/proxann_data@main/"
145
+ "wiki_test.metadata.parquet"
146
+ },
147
+ split="test",
148
+ )
149
+ print("Wiki test size:", len(wiki_test))
150
  ```
151
 
152
  ## Related Resources
proxann_data.py DELETED
@@ -1,170 +0,0 @@
1
- # proxann_data.py
2
- #
3
- # Dataset script for `lcalvobartolome/proxann_data`.
4
- # Provides two configs:
5
- # - "bills": US Congressional bills
6
- # - "wiki": Wikipedia articles
7
- #
8
- # Usage:
9
- # from datasets import load_dataset
10
- #
11
- # bills_train = load_dataset(
12
- # "lcalvobartolome/proxann_data",
13
- # name="bills",
14
- # split="train",
15
- # trust_remote_code=True,
16
- # )
17
- #
18
- # wiki_train = load_dataset(
19
- # "lcalvobartolome/proxann_data",
20
- # name="wiki",
21
- # split="train",
22
- # trust_remote_code=True,
23
- # )
24
-
25
- import os
26
- from typing import Dict, Any, Iterator, Tuple
27
-
28
- import datasets
29
- import pyarrow.parquet as pq
30
-
31
-
32
- _CITATION = """\
33
- @inproceedings{soldaini2016proxann,
34
- title = {ProxANN: ...},
35
- author = {...},
36
- year = {2016},
37
- }
38
- """
39
-
40
- _DESCRIPTION = """\
41
- ProxANN dataset with two components:
42
-
43
- - bills: US Congressional bills with summaries and topics
44
- - wiki: Wikipedia articles with category hierarchy
45
-
46
- Stored as Parquet files with pre-computed MiniLM embeddings.
47
- """
48
-
49
- _HOMEPAGE = "https://huggingface.co/datasets/lcalvobartolome/proxann_data"
50
- _LICENSE = "mit"
51
-
52
- _BASE_URL = (
53
- "https://huggingface.co/datasets/"
54
- "lcalvobartolome/proxann_data/resolve/main"
55
- )
56
-
57
- _BILLS_FILES = {
58
- "train": f"{_BASE_URL}/bills_train.metadata.embeddings.jsonl.all-MiniLM-L6-v2.parquet",
59
- "test": f"{_BASE_URL}/bills_test.metadata.parquet",
60
- }
61
-
62
- _WIKI_FILES = {
63
- "train": f"{_BASE_URL}/wiki_train.metadata.embeddings.jsonl.all-MiniLM-L6-v2.parquet",
64
- "test": f"{_BASE_URL}/wiki_test.metadata.parquet",
65
- }
66
-
67
-
68
- class ProxannConfig(datasets.BuilderConfig):
69
- """BuilderConfig for ProxANN (bills / wiki)."""
70
-
71
- def __init__(self, *, features: datasets.Features, **kwargs):
72
- super().__init__(**kwargs)
73
- self.features = features
74
-
75
-
76
- class ProxannData(datasets.GeneratorBasedBuilder):
77
- """ProxANN dataset with two configs: bills and wiki."""
78
-
79
- VERSION = datasets.Version("1.0.0")
80
- BUILDER_CONFIG_CLASS = ProxannConfig
81
- DEFAULT_CONFIG_NAME = "bills"
82
-
83
- BUILDER_CONFIGS = [
84
- ProxannConfig(
85
- name="bills",
86
- version=VERSION,
87
- description="US Congressional bills with summaries and topics.",
88
- features=datasets.Features(
89
- {
90
- "id": datasets.Value("string"),
91
- "summary": datasets.Value("string"),
92
- "topic": datasets.Value("string"),
93
- "subtopic": datasets.Value("string"),
94
- "subjects_top_term": datasets.Value("string"),
95
- "date": datasets.Value("timestamp[ns]"),
96
- "tokenized_text": datasets.Value("string"),
97
- "embeddings": datasets.Value("string"),
98
- }
99
- ),
100
- ),
101
- ProxannConfig(
102
- name="wiki",
103
- version=VERSION,
104
- description="Wikipedia articles with category hierarchy.",
105
- features=datasets.Features(
106
- {
107
- "id": datasets.Value("string"),
108
- "text": datasets.Value("string"),
109
- "supercategory": datasets.Value("string"),
110
- "category": datasets.Value("string"),
111
- "subcategory": datasets.Value("string"),
112
- "page_name": datasets.Value("string"),
113
- "tokenized_text": datasets.Value("string"),
114
- "embeddings": datasets.Value("string"),
115
- }
116
- ),
117
- ),
118
- ]
119
-
120
- def _info(self) -> datasets.DatasetInfo:
121
- return datasets.DatasetInfo(
122
- description=_DESCRIPTION,
123
- features=self.config.features,
124
- homepage=_HOMEPAGE,
125
- license=_LICENSE,
126
- citation=_CITATION,
127
- )
128
-
129
- def _split_generators(
130
- self, dl_manager: datasets.DownloadManager
131
- ) -> list[datasets.SplitGenerator]:
132
- """Download the appropriate files and define splits."""
133
-
134
- if self.config.name == "bills":
135
- data_files = dl_manager.download_and_extract(_BILLS_FILES)
136
- elif self.config.name == "wiki":
137
- data_files = dl_manager.download_and_extract(_WIKI_FILES)
138
- else:
139
- raise ValueError(f"Unknown config name: {self.config.name}")
140
-
141
- return [
142
- datasets.SplitGenerator(
143
- name=datasets.Split.TRAIN,
144
- gen_kwargs={"filepath": data_files["train"]},
145
- ),
146
- datasets.SplitGenerator(
147
- name=datasets.Split.TEST,
148
- gen_kwargs={"filepath": data_files["test"]},
149
- ),
150
- ]
151
-
152
- def _generate_examples(
153
- self, filepath: str
154
- ) -> Iterator[Tuple[int, Dict[str, Any]]]:
155
- """Yield (key, example) pairs from a Parquet file."""
156
-
157
- table = pq.read_table(filepath)
158
-
159
- feature_names = list(self.config.features.keys())
160
- table = table.select(feature_names)
161
-
162
- for batch in table.to_batches():
163
- batch_table = batch.to_pydict()
164
- num_rows = len(next(iter(batch_table.values())))
165
- for i in range(num_rows):
166
- row = {
167
- col: batch_table[col][i]
168
- for col in feature_names
169
- }
170
- yield i, row