File size: 9,449 Bytes
7408778
 
 
 
 
 
 
 
 
 
 
 
 
 
2096779
7408778
 
 
 
 
6408098
 
 
 
 
 
 
 
7408778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6408098
 
 
 
 
 
 
 
 
 
 
 
2096779
6408098
 
4c012bc
6408098
 
 
 
 
 
 
 
4c012bc
6408098
 
 
 
 
 
 
 
 
 
 
 
4c012bc
6408098
4c012bc
6408098
4c012bc
 
6408098
 
4c012bc
6408098
 
 
 
 
 
 
 
 
7408778
6408098
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2f723f
 
 
 
6408098
 
 
 
 
 
 
 
 
 
7408778
 
c2f723f
 
7408778
c2f723f
 
 
 
7408778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6408098
 
7408778
 
 
 
 
6408098
7408778
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4c012bc
6408098
7408778
 
 
 
 
 
 
 
6408098
 
 
 
7408778
6408098
 
 
 
 
7408778
6408098
 
 
7408778
6408098
 
 
7408778
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
#!/usr/bin/env -S uv run
# /// script
# requires-python = ">=3.10"
# dependencies = [
#     "datasets",
#     "huggingface_hub",
# ]
# ///

"""
Deduplicate MCP clients by name and capabilities.

This script takes the existing mcp-clients dataset and creates a deduplicated view
where rows are unique by (name, capabilities). When multiple versions exist for
the same (name, capabilities), they are concatenated with "," in a versions column.

It also strips " (via mcp-remote X.Y.Z)" suffixes from client names and reports
the mcp-remote versions encountered.

Usage:
    # Push deduplicated data to the same repo (evalstate/mcp-clients) under 'deduplicated' split
    uv run deduplicate_clients.py --push-to-hub

    # Read from raw, push to dedup split in same repo
    uv run deduplicate_clients.py --source-split raw --target-split deduplicated --push-to-hub

    # Output to local file
    uv run deduplicate_clients.py -o deduplicated.ndjson
"""

import sys
import json
import re
import argparse
from datetime import datetime
from collections import defaultdict

from datasets import Dataset, Features, Value, load_dataset


# Regex to match "(via mcp-remote X.Y.Z)" suffix
MCP_REMOTE_PATTERN = re.compile(r'\s*\(via mcp-remote [\d.]+\)')


def strip_mcp_remote_suffix(name):
    """Remove the '(via mcp-remote X.Y.Z)' suffix from client name."""
    match = MCP_REMOTE_PATTERN.search(name)
    if match:
        # Extract the version number from the suffix
        suffix = match.group(0)
        version_match = re.search(r'mcp-remote ([\d.]+)', suffix)
        if version_match:
            version = version_match.group(1)
            return MCP_REMOTE_PATTERN.sub('', name).strip(), version
    return name, None


def normalize_capabilities(caps):
    """Normalize capabilities for comparison."""
    if caps is None:
        return None
    if isinstance(caps, str):
        if caps == '{}':
            return {}
        try:
            return json.loads(caps)
        except Exception:
            return caps
    return caps


def capabilities_to_string(caps):
    """Convert capabilities to JSON string for storage."""
    if caps is None:
        return ''
    if isinstance(caps, dict):
        return json.dumps(caps, sort_keys=True)
    return str(caps)


def create_dataset_from_deduplicated(deduplicated_list, features=None):
    """Create a Hugging Face Dataset from the deduplicated list."""
    if features is None:
        # Use 'versions' (plural) for the deduplicated split
        features = Features({
            'name': Value('string'),
            'versions': Value('string'),
            'capabilities': Value('string'),
            'last_seen': Value('string'),
        })

    records = []
    for client in deduplicated_list:
        records.append({
            'name': client['name'],
            'versions': client['versions'],
            'capabilities': capabilities_to_string(client['capabilities']),
            'last_seen': client['last_seen'],
        })

    return Dataset.from_list(records, features=features)


def push_to_hub(deduplicated_list, repo_id, split=None, token=None, private=False):
    """Push the deduplicated dataset to Hugging Face Hub."""
    dataset = create_dataset_from_deduplicated(deduplicated_list)

    if split:
        print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: deduplicated, split: {split})", file=sys.stderr)
    else:
        print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: deduplicated)", file=sys.stderr)

    # Push to hub with explicit config_name to avoid creating "default" config
    # This ensures the data goes to the "deduplicated" config, not a "default" config
    dataset.push_to_hub(
        repo_id=repo_id,
        config_name="deduplicated",  # Explicitly set config name to avoid "default" config creation
        split=split,
        token=token,
        private=private,
        commit_message=f"Update deduplicated clients ({datetime.now().strftime('%Y-%m-%d %H:%M')})",
    )

    print(f"Successfully pushed {len(deduplicated_list):,} deduplicated records to {repo_id}", file=sys.stderr)


def main():
    parser = argparse.ArgumentParser(
        description="Deduplicate MCP clients by name and capabilities",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  # Push deduplicated data to same repo under 'deduplicated' split
  %(prog)s --push-to-hub

  # Use custom source/target splits
  %(prog)s --source-split raw --target-split dedup --push-to-hub

  # Output to local file
  %(prog)s -o deduplicated.ndjson

  # Push to a different repo
  %(prog)s --target-repo my-org/mcp-clients-dedup --push-to-hub
        """
    )
    parser.add_argument("-o", "--output", help="Output file path (default: stdout)")
    parser.add_argument("--source-repo", default="evalstate/mcp-clients",
                        help="Source repository ID (default: evalstate/mcp-clients)")
    parser.add_argument("--source-config", default="raw",
                        help="Source config name (default: raw)")
    parser.add_argument("--source-split", default=None,
                        help="Source split name (optional, defaults to config name)")
    parser.add_argument("--target-repo", default="evalstate/mcp-clients",
                        help="Target repository ID (default: evalstate/mcp-clients)")
    parser.add_argument("--target-split", default="deduplicated",
                        help="Target split name (default: deduplicated)")
    parser.add_argument("--push-to-hub", action="store_true",
                        help="Push the resulting dataset to Hugging Face Hub")
    parser.add_argument("--token", default=None,
                        help="HF token (defaults to HF_TOKEN env var or cached token)")
    parser.add_argument("--private", action="store_true",
                        help="Create/push to a private repository")
    args = parser.parse_args()

    # Use config name as split if not specified
    source_split = args.source_split or args.source_config

    print(f"[{datetime.now().isoformat()}] Loading dataset from {args.source_repo} (config: {args.source_config}, split: {source_split})...", file=sys.stderr)

    # Load the source dataset using config name (required for multi-config datasets)
    ds = load_dataset(args.source_repo, args.source_config, split=source_split)

    print(f"[{datetime.now().isoformat()}] Loaded {len(ds):,} client records", file=sys.stderr)

    # Track mcp-remote versions found
    mcp_remote_versions = defaultdict(int)

    # Group by (name, capabilities)
    groups = defaultdict(list)

    for row in ds:
        # Strip mcp-remote suffix from name
        clean_name, mcp_version = strip_mcp_remote_suffix(row['name'])

        if mcp_version:
            mcp_remote_versions[mcp_version] += 1

        # Normalize capabilities for grouping
        caps = normalize_capabilities(row['capabilities'])
        caps_key = capabilities_to_string(caps)

        key = (clean_name, caps_key)
        groups[key].append({
            'name': clean_name,
            'version': row['version'],
            'capabilities': caps,
            'last_seen': row['last_seen'],
        })

    print(f"[{datetime.now().isoformat()}] Found {len(groups):,} unique (name, capabilities) combinations", file=sys.stderr)

    # Print mcp-remote version summary
    if mcp_remote_versions:
        total_with_mcp_remote = sum(mcp_remote_versions.values())
        print(f"[{datetime.now().isoformat()}] MCP-remote version summary:", file=sys.stderr)
        print(f"  Total records with mcp-remote suffix: {total_with_mcp_remote:,}", file=sys.stderr)
        print(f"  Unique mcp-remote versions: {len(mcp_remote_versions)}", file=sys.stderr)
        print(f"  Versions encountered:", file=sys.stderr)
        for version in sorted(mcp_remote_versions.keys()):
            print(f"    - mcp-remote {version}: {mcp_remote_versions[version]:,} records", file=sys.stderr)
        print()

    # Create deduplicated records
    deduplicated = []

    for (name, caps_key), records in groups.items():
        # Find the latest last_seen
        latest_record = max(records, key=lambda x: x['last_seen'])

        # Collect all unique versions (sorted)
        versions = sorted(set(r['version'] for r in records))
        versions_str = ','.join(versions)

        deduplicated.append({
            'name': name,
            'versions': versions_str,
            'capabilities': latest_record['capabilities'],
            'last_seen': latest_record['last_seen'],
        })

    # Sort by last_seen descending (so latest are at the beginning)
    deduplicated.sort(key=lambda x: x['last_seen'], reverse=True)

    print(f"[{datetime.now().isoformat()}] Created {len(deduplicated):,} deduplicated records", file=sys.stderr)

    # Handle push to hub
    if args.push_to_hub:
        push_to_hub(deduplicated, args.target_repo, split=args.target_split, token=args.token, private=args.private)
        return

    # Handle local output
    if args.output:
        out_file = open(args.output, 'w')
    else:
        out_file = sys.stdout

    # Output as NDJSON
    for client in deduplicated:
        out_file.write(json.dumps(client) + '\n')

    if args.output:
        out_file.close()
        print(f"Output written to: {args.output}", file=sys.stderr)


if __name__ == '__main__':
    main()