mcp-clients / deduplicate_clients.py
evalstate's picture
evalstate HF Staff
Upload deduplicate_clients.py with huggingface_hub
4c012bc verified
#!/usr/bin/env -S uv run
# /// script
# requires-python = ">=3.10"
# dependencies = [
# "datasets",
# "huggingface_hub",
# ]
# ///
"""
Deduplicate MCP clients by name and capabilities.
This script takes the existing mcp-clients dataset and creates a deduplicated view
where rows are unique by (name, capabilities). When multiple versions exist for
the same (name, capabilities), they are concatenated with "," in a versions column.
It also strips " (via mcp-remote X.Y.Z)" suffixes from client names and reports
the mcp-remote versions encountered.
Usage:
# Push deduplicated data to the same repo (evalstate/mcp-clients) under 'deduplicated' split
uv run deduplicate_clients.py --push-to-hub
# Read from raw, push to dedup split in same repo
uv run deduplicate_clients.py --source-split raw --target-split deduplicated --push-to-hub
# Output to local file
uv run deduplicate_clients.py -o deduplicated.ndjson
"""
import sys
import json
import re
import argparse
from datetime import datetime
from collections import defaultdict
from datasets import Dataset, Features, Value, load_dataset
# Regex to match "(via mcp-remote X.Y.Z)" suffix
MCP_REMOTE_PATTERN = re.compile(r'\s*\(via mcp-remote [\d.]+\)')
def strip_mcp_remote_suffix(name):
"""Remove the '(via mcp-remote X.Y.Z)' suffix from client name."""
match = MCP_REMOTE_PATTERN.search(name)
if match:
# Extract the version number from the suffix
suffix = match.group(0)
version_match = re.search(r'mcp-remote ([\d.]+)', suffix)
if version_match:
version = version_match.group(1)
return MCP_REMOTE_PATTERN.sub('', name).strip(), version
return name, None
def normalize_capabilities(caps):
"""Normalize capabilities for comparison."""
if caps is None:
return None
if isinstance(caps, str):
if caps == '{}':
return {}
try:
return json.loads(caps)
except Exception:
return caps
return caps
def capabilities_to_string(caps):
"""Convert capabilities to JSON string for storage."""
if caps is None:
return ''
if isinstance(caps, dict):
return json.dumps(caps, sort_keys=True)
return str(caps)
def create_dataset_from_deduplicated(deduplicated_list, features=None):
"""Create a Hugging Face Dataset from the deduplicated list."""
if features is None:
# Use 'versions' (plural) for the deduplicated split
features = Features({
'name': Value('string'),
'versions': Value('string'),
'capabilities': Value('string'),
'last_seen': Value('string'),
})
records = []
for client in deduplicated_list:
records.append({
'name': client['name'],
'versions': client['versions'],
'capabilities': capabilities_to_string(client['capabilities']),
'last_seen': client['last_seen'],
})
return Dataset.from_list(records, features=features)
def push_to_hub(deduplicated_list, repo_id, split=None, token=None, private=False):
"""Push the deduplicated dataset to Hugging Face Hub."""
dataset = create_dataset_from_deduplicated(deduplicated_list)
if split:
print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: deduplicated, split: {split})", file=sys.stderr)
else:
print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: deduplicated)", file=sys.stderr)
# Push to hub with explicit config_name to avoid creating "default" config
# This ensures the data goes to the "deduplicated" config, not a "default" config
dataset.push_to_hub(
repo_id=repo_id,
config_name="deduplicated", # Explicitly set config name to avoid "default" config creation
split=split,
token=token,
private=private,
commit_message=f"Update deduplicated clients ({datetime.now().strftime('%Y-%m-%d %H:%M')})",
)
print(f"Successfully pushed {len(deduplicated_list):,} deduplicated records to {repo_id}", file=sys.stderr)
def main():
parser = argparse.ArgumentParser(
description="Deduplicate MCP clients by name and capabilities",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Push deduplicated data to same repo under 'deduplicated' split
%(prog)s --push-to-hub
# Use custom source/target splits
%(prog)s --source-split raw --target-split dedup --push-to-hub
# Output to local file
%(prog)s -o deduplicated.ndjson
# Push to a different repo
%(prog)s --target-repo my-org/mcp-clients-dedup --push-to-hub
"""
)
parser.add_argument("-o", "--output", help="Output file path (default: stdout)")
parser.add_argument("--source-repo", default="evalstate/mcp-clients",
help="Source repository ID (default: evalstate/mcp-clients)")
parser.add_argument("--source-config", default="raw",
help="Source config name (default: raw)")
parser.add_argument("--source-split", default=None,
help="Source split name (optional, defaults to config name)")
parser.add_argument("--target-repo", default="evalstate/mcp-clients",
help="Target repository ID (default: evalstate/mcp-clients)")
parser.add_argument("--target-split", default="deduplicated",
help="Target split name (default: deduplicated)")
parser.add_argument("--push-to-hub", action="store_true",
help="Push the resulting dataset to Hugging Face Hub")
parser.add_argument("--token", default=None,
help="HF token (defaults to HF_TOKEN env var or cached token)")
parser.add_argument("--private", action="store_true",
help="Create/push to a private repository")
args = parser.parse_args()
# Use config name as split if not specified
source_split = args.source_split or args.source_config
print(f"[{datetime.now().isoformat()}] Loading dataset from {args.source_repo} (config: {args.source_config}, split: {source_split})...", file=sys.stderr)
# Load the source dataset using config name (required for multi-config datasets)
ds = load_dataset(args.source_repo, args.source_config, split=source_split)
print(f"[{datetime.now().isoformat()}] Loaded {len(ds):,} client records", file=sys.stderr)
# Track mcp-remote versions found
mcp_remote_versions = defaultdict(int)
# Group by (name, capabilities)
groups = defaultdict(list)
for row in ds:
# Strip mcp-remote suffix from name
clean_name, mcp_version = strip_mcp_remote_suffix(row['name'])
if mcp_version:
mcp_remote_versions[mcp_version] += 1
# Normalize capabilities for grouping
caps = normalize_capabilities(row['capabilities'])
caps_key = capabilities_to_string(caps)
key = (clean_name, caps_key)
groups[key].append({
'name': clean_name,
'version': row['version'],
'capabilities': caps,
'last_seen': row['last_seen'],
})
print(f"[{datetime.now().isoformat()}] Found {len(groups):,} unique (name, capabilities) combinations", file=sys.stderr)
# Print mcp-remote version summary
if mcp_remote_versions:
total_with_mcp_remote = sum(mcp_remote_versions.values())
print(f"[{datetime.now().isoformat()}] MCP-remote version summary:", file=sys.stderr)
print(f" Total records with mcp-remote suffix: {total_with_mcp_remote:,}", file=sys.stderr)
print(f" Unique mcp-remote versions: {len(mcp_remote_versions)}", file=sys.stderr)
print(f" Versions encountered:", file=sys.stderr)
for version in sorted(mcp_remote_versions.keys()):
print(f" - mcp-remote {version}: {mcp_remote_versions[version]:,} records", file=sys.stderr)
print()
# Create deduplicated records
deduplicated = []
for (name, caps_key), records in groups.items():
# Find the latest last_seen
latest_record = max(records, key=lambda x: x['last_seen'])
# Collect all unique versions (sorted)
versions = sorted(set(r['version'] for r in records))
versions_str = ','.join(versions)
deduplicated.append({
'name': name,
'versions': versions_str,
'capabilities': latest_record['capabilities'],
'last_seen': latest_record['last_seen'],
})
# Sort by last_seen descending (so latest are at the beginning)
deduplicated.sort(key=lambda x: x['last_seen'], reverse=True)
print(f"[{datetime.now().isoformat()}] Created {len(deduplicated):,} deduplicated records", file=sys.stderr)
# Handle push to hub
if args.push_to_hub:
push_to_hub(deduplicated, args.target_repo, split=args.target_split, token=args.token, private=args.private)
return
# Handle local output
if args.output:
out_file = open(args.output, 'w')
else:
out_file = sys.stdout
# Output as NDJSON
for client in deduplicated:
out_file.write(json.dumps(client) + '\n')
if args.output:
out_file.close()
print(f"Output written to: {args.output}", file=sys.stderr)
if __name__ == '__main__':
main()