davanstrien HF Staff commited on
Commit
a090310
·
1 Parent(s): a34d94f
Files changed (1) hide show
  1. semantic-dedupe.py +270 -0
semantic-dedupe.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.9"
3
+ # dependencies = [
4
+ # "semhash",
5
+ # "datasets",
6
+ # "huggingface-hub",
7
+ # "hf-transfer",
8
+ # ]
9
+ # ///
10
+
11
+ """
12
+ Semantic deduplication for Hugging Face datasets using SemHash.
13
+
14
+ This script removes duplicate or near-duplicate text samples from datasets based on
15
+ semantic similarity, helping to clean training data and prevent train/test leakage.
16
+
17
+ Example usage:
18
+ # Basic deduplication
19
+ uv run semantic-dedupe.py username/dataset text username/dataset-deduped
20
+
21
+ # With custom threshold and max samples for testing
22
+ uv run semantic-dedupe.py username/dataset text username/dataset-deduped \\
23
+ --threshold 0.85 --max-samples 1000
24
+
25
+ # Using HF Jobs with GPU
26
+ hf jobs uv run --flavor a10 \\
27
+ -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\
28
+ https://huggingface.co/datasets/uv-scripts/deduplication/raw/main/semantic-dedupe.py \\
29
+ username/dataset text username/dataset-deduped
30
+ """
31
+
32
+ import argparse
33
+ import os
34
+ import sys
35
+ from datetime import datetime
36
+ from typing import Optional
37
+
38
+ from datasets import Dataset, load_dataset
39
+ from huggingface_hub import DatasetCard, login
40
+ from semhash import SemHash
41
+
42
+ # Enable fast transfers
43
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
44
+
45
+
46
+ def parse_args():
47
+ parser = argparse.ArgumentParser(
48
+ description="Deduplicate a dataset using semantic similarity",
49
+ formatter_class=argparse.RawDescriptionHelpFormatter,
50
+ epilog="""
51
+ Examples:
52
+ # Basic usage
53
+ uv run semantic-dedupe.py imdb text imdb-deduped
54
+
55
+ # With options
56
+ uv run semantic-dedupe.py squad question squad-deduped --threshold 0.85 --method duplicates
57
+
58
+ # Test with small sample
59
+ uv run semantic-dedupe.py large-dataset text test-dedup --max-samples 100
60
+ """,
61
+ )
62
+
63
+ parser.add_argument("dataset", help="Input dataset ID (e.g., 'imdb' or 'username/dataset')")
64
+ parser.add_argument("column", help="Text column to deduplicate on")
65
+ parser.add_argument("output_repo", help="Output dataset repository name")
66
+
67
+ parser.add_argument(
68
+ "--split",
69
+ default="train",
70
+ help="Dataset split to process (default: train)",
71
+ )
72
+ parser.add_argument(
73
+ "--method",
74
+ choices=["duplicates", "outliers", "representatives"],
75
+ default="duplicates",
76
+ help="Deduplication method (default: duplicates)",
77
+ )
78
+ parser.add_argument(
79
+ "--threshold",
80
+ type=float,
81
+ default=0.9,
82
+ help="Similarity threshold for duplicates (default: 0.9)",
83
+ )
84
+ parser.add_argument(
85
+ "--batch-size",
86
+ type=int,
87
+ default=64,
88
+ help="Batch size for processing (default: 64)",
89
+ )
90
+ parser.add_argument(
91
+ "--max-samples",
92
+ type=int,
93
+ help="Maximum number of samples to process (for testing)",
94
+ )
95
+ parser.add_argument(
96
+ "--private",
97
+ action="store_true",
98
+ help="Create private dataset repository",
99
+ )
100
+ parser.add_argument(
101
+ "--hf-token",
102
+ default=os.environ.get("HF_TOKEN"),
103
+ help="Hugging Face API token (defaults to HF_TOKEN env var)",
104
+ )
105
+
106
+ return parser.parse_args()
107
+
108
+
109
+ def create_dataset_card(
110
+ original_dataset: str,
111
+ column: str,
112
+ method: str,
113
+ threshold: float,
114
+ original_size: int,
115
+ deduped_size: int,
116
+ ) -> str:
117
+ """Create a dataset card with deduplication information."""
118
+ reduction_pct = ((original_size - deduped_size) / original_size) * 100
119
+
120
+ return f"""---
121
+ viewer: false
122
+ tags:
123
+ - deduplication
124
+ - semhash
125
+ - uv-script
126
+ ---
127
+
128
+ # Deduplicated {original_dataset}
129
+
130
+ This dataset is a deduplicated version of [{original_dataset}](https://huggingface.co/datasets/{original_dataset}).
131
+
132
+ ## Deduplication Details
133
+
134
+ - **Method**: {method}
135
+ - **Column**: `{column}`
136
+ - **Threshold**: {threshold}
137
+ - **Original size**: {original_size:,} samples
138
+ - **Deduplicated size**: {deduped_size:,} samples
139
+ - **Reduction**: {reduction_pct:.1f}%
140
+ - **Date**: {datetime.now().strftime("%Y-%m-%d %H:%M UTC")}
141
+
142
+ ## Method Description
143
+
144
+ {get_method_description(method)}
145
+
146
+ ## How to Use
147
+
148
+ ```python
149
+ from datasets import load_dataset
150
+
151
+ # Load the deduplicated dataset
152
+ dataset = load_dataset("{os.environ.get('HF_USERNAME', 'username')}/{os.path.basename(original_dataset)}-deduped")
153
+ ```
154
+
155
+ ## Reproduce
156
+
157
+ This dataset was created using the [uv-scripts/deduplication](https://huggingface.co/datasets/uv-scripts/deduplication) tool:
158
+
159
+ ```bash
160
+ uv run https://huggingface.co/datasets/uv-scripts/deduplication/raw/main/semantic-dedupe.py \\
161
+ {original_dataset} {column} {os.path.basename(original_dataset)}-deduped \\
162
+ --method {method} --threshold {threshold}
163
+ ```
164
+
165
+ Generated with 🤖 UV Scripts
166
+ """
167
+
168
+
169
+ def get_method_description(method: str) -> str:
170
+ """Get description for deduplication method."""
171
+ descriptions = {
172
+ "duplicates": "Removes semantic duplicates by finding samples with high similarity scores above the threshold.",
173
+ "outliers": "Removes outlier samples that have low similarity to other samples in the dataset.",
174
+ "representatives": "Keeps only representative samples, removing both duplicates and outliers.",
175
+ }
176
+ return descriptions.get(method, "Unknown method")
177
+
178
+
179
+ def main():
180
+ args = parse_args()
181
+
182
+ # Authenticate
183
+ if args.hf_token:
184
+ login(args.hf_token)
185
+ else:
186
+ print("Warning: No HF token provided. Using cached credentials or anonymous mode.")
187
+
188
+ # Load dataset
189
+ print(f"Loading dataset: {args.dataset}")
190
+ dataset = load_dataset(args.dataset, split=args.split)
191
+
192
+ # Apply max samples if specified
193
+ if args.max_samples:
194
+ dataset = dataset.select(range(min(args.max_samples, len(dataset))))
195
+ print(f"Limited to {len(dataset)} samples for testing")
196
+
197
+ original_size = len(dataset)
198
+
199
+ # Check if column exists
200
+ if args.column not in dataset.column_names:
201
+ print(f"Error: Column '{args.column}' not found in dataset")
202
+ print(f"Available columns: {', '.join(dataset.column_names)}")
203
+ sys.exit(1)
204
+
205
+ # Initialize SemHash
206
+ print("Initializing SemHash...")
207
+ semhash = SemHash(
208
+ batch_size=args.batch_size,
209
+ show_progress=True,
210
+ )
211
+
212
+ # Perform deduplication
213
+ print(f"Performing {args.method} deduplication on '{args.column}' column...")
214
+
215
+ if args.method == "duplicates":
216
+ result = semhash.deduplicate(
217
+ dataset,
218
+ text_column=args.column,
219
+ threshold=args.threshold,
220
+ )
221
+ elif args.method == "outliers":
222
+ result = semhash.filter_outliers(
223
+ dataset,
224
+ text_column=args.column,
225
+ threshold=args.threshold,
226
+ )
227
+ elif args.method == "representatives":
228
+ result = semhash.get_representatives(
229
+ dataset,
230
+ text_column=args.column,
231
+ threshold=args.threshold,
232
+ )
233
+ else:
234
+ raise ValueError(f"Unknown method: {args.method}")
235
+
236
+ deduped_size = len(result)
237
+
238
+ # Print statistics
239
+ print(f"\nDeduplication complete!")
240
+ print(f"Original size: {original_size:,}")
241
+ print(f"Deduplicated size: {deduped_size:,}")
242
+ print(f"Removed: {original_size - deduped_size:,} ({((original_size - deduped_size) / original_size) * 100:.1f}%)")
243
+
244
+ # Create dataset card
245
+ card = create_dataset_card(
246
+ args.dataset,
247
+ args.column,
248
+ args.method,
249
+ args.threshold,
250
+ original_size,
251
+ deduped_size,
252
+ )
253
+
254
+ # Push to hub
255
+ print(f"\nPushing to hub: {args.output_repo}")
256
+ result.push_to_hub(
257
+ args.output_repo,
258
+ private=args.private,
259
+ commit_message=f"Deduplicated using {args.method} method",
260
+ )
261
+
262
+ # Create and push dataset card
263
+ dataset_card = DatasetCard(card)
264
+ dataset_card.push_to_hub(args.output_repo)
265
+
266
+ print(f"✅ Dataset successfully pushed to: https://huggingface.co/datasets/{args.output_repo}")
267
+
268
+
269
+ if __name__ == "__main__":
270
+ main()