| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """ |
| | Extract unique MCP client combinations (name, version, capabilities) from evalstate/hf-mcp-logs dataset. |
| | Only processes "initialize" method calls - ignores "session_delete" events which lack complete client information. |
| | Tracks most recent "last seen" timestamp for each unique client configuration. |
| | Uses batched streaming for efficient I/O on large datasets (~7M rows). |
| | |
| | Usage: |
| | # Process all data and save to local file |
| | python3.10 extract_mcp_clients.py -o clients.ndjson |
| | |
| | # Push to Hugging Face Hub (creates/updates evalstate/mcp-clients dataset) |
| | python3.10 extract_mcp_clients.py --push-to-hub |
| | |
| | # Push to a specific split (for scheduled pipelines) |
| | python3.10 extract_mcp_clients.py --push-to-hub --split raw |
| | |
| | # Process a sample for testing |
| | python3.10 extract_mcp_clients.py --limit 10000 -o sample.ndjson |
| | |
| | Output fields: |
| | - name: MCP client name (e.g., "Cursor", "Anthropic/ClaudeAI", "chat-ui-mcp") |
| | - version: Client version |
| | - capabilities: Client capabilities (JSON object or null) |
| | - last_seen: Most recent timestamp when this client was seen |
| | """ |
| |
|
| | import argparse |
| | import json |
| | import sys |
| | from datetime import datetime |
| | from pathlib import Path |
| |
|
| | from datasets import Dataset, Features, Value |
| | from huggingface_hub import HfApi |
| |
|
| |
|
| | def normalize_capabilities(caps): |
| | """Normalize capabilities for comparison.""" |
| | if caps is None: |
| | return None |
| | if isinstance(caps, str): |
| | |
| | if caps == '{}': |
| | return {} |
| | try: |
| | return json.loads(caps) |
| | except Exception: |
| | return caps |
| | return caps |
| |
|
| |
|
| | def capabilities_to_string(caps): |
| | """Convert capabilities to JSON string for storage.""" |
| | if caps is None: |
| | return '' |
| | if isinstance(caps, dict): |
| | return json.dumps(caps, sort_keys=True) |
| | return str(caps) |
| |
|
| |
|
| | def create_dataset_from_clients(clients_list, features=None): |
| | """Create a Hugging Face Dataset from the clients list.""" |
| | if features is None: |
| | features = Features({ |
| | 'name': Value('string'), |
| | 'version': Value('string'), |
| | 'capabilities': Value('string'), |
| | 'last_seen': Value('string'), |
| | }) |
| |
|
| | |
| | records = [] |
| | for client in clients_list: |
| | record = { |
| | 'name': client['name'], |
| | 'version': client['version'], |
| | 'capabilities': capabilities_to_string(client['capabilities']), |
| | 'last_seen': client['last_seen'], |
| | } |
| | records.append(record) |
| |
|
| | return Dataset.from_list(records, features=features) |
| |
|
| |
|
| | def push_to_hub(clients_list, repo_id, split=None, token=None, private=False): |
| | """Push the clients dataset to Hugging Face Hub.""" |
| | |
| | dataset = create_dataset_from_clients(clients_list) |
| |
|
| | |
| | if split: |
| | print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: raw, split: {split})", file=sys.stderr) |
| | else: |
| | print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: raw)", file=sys.stderr) |
| |
|
| | |
| | |
| | dataset.push_to_hub( |
| | repo_id=repo_id, |
| | config_name="raw", |
| | split=split, |
| | token=token, |
| | private=private, |
| | commit_message=f"Update MCP clients dataset ({datetime.now().strftime('%Y-%m-%d %H:%M')})", |
| | ) |
| | print(f"Successfully pushed {len(clients_list):,} clients to {repo_id}", file=sys.stderr) |
| |
|
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser( |
| | description="Extract unique MCP clients with last seen timestamp from evalstate/hf-mcp-logs", |
| | formatter_class=argparse.RawDescriptionHelpFormatter, |
| | epilog=""" |
| | Examples: |
| | # Process all data and save to local file |
| | %(prog)s -o clients.ndjson |
| | |
| | # Push to Hugging Face Hub (requires authentication: `hf auth login`) |
| | %(prog)s --push-to-hub |
| | |
| | # Push to a specific split (ideal for scheduled jobs/pipelines) |
| | %(prog)s --push-to-hub --split raw |
| | |
| | # Process with limit (for testing) |
| | %(prog)s --limit 10000 -o sample.ndjson |
| | |
| | # Push to a private repo |
| | %(prog)s --push-to-hub --private |
| | """ |
| | ) |
| | parser.add_argument("-o", "--output", help="Output file path (default: stdout)") |
| | parser.add_argument("--limit", type=int, |
| | help="Limit processing to N rows (useful for testing)") |
| | parser.add_argument("--format", choices=["ndjson", "csv"], default="ndjson", |
| | help="Output format (default: ndjson)") |
| | parser.add_argument("--batch-size", type=int, default=1000, |
| | help="Batch size for streaming (default: 1000). Larger values may " |
| | "improve I/O efficiency but use more memory.") |
| | parser.add_argument("--push-to-hub", action="store_true", |
| | help="Push the resulting dataset to Hugging Face Hub") |
| | parser.add_argument("--split", default=None, |
| | help="Split name when pushing to Hub (e.g., 'raw' for scheduled pipelines)") |
| | parser.add_argument("--repo-id", default="evalstate/mcp-clients", |
| | help="HF Hub repository ID (default: evalstate/mcp-clients)") |
| | parser.add_argument("--token", default=None, |
| | help="HF token (defaults to HF_TOKEN env var or cached token)") |
| | parser.add_argument("--private", action="store_true", |
| | help="Create/push to a private repository") |
| | args = parser.parse_args() |
| |
|
| | |
| | unique_clients = {} |
| |
|
| | |
| | print(f"Loading dataset: evalstate/hf-mcp-logs (sessions split)", file=sys.stderr) |
| | print(f"Using batch size: {args.batch_size}", file=sys.stderr) |
| |
|
| | from datasets import load_dataset |
| | ds = load_dataset('evalstate/hf-mcp-logs', 'sessions', streaming=True) |
| | |
| | sessions_ds = ds['sessions'] |
| |
|
| | total_rows = 0 |
| | skipped_deletes = 0 |
| | skipped_other = 0 |
| | start_time = None |
| |
|
| | |
| | for batch in sessions_ds.iter(batch_size=args.batch_size): |
| | batch_len = len(batch['name']) |
| | total_rows += batch_len |
| |
|
| | |
| | if total_rows % 100000 == 0: |
| | if start_time is None: |
| | start_time = datetime.now() |
| | elapsed = (datetime.now() - start_time).total_seconds() |
| | rate = total_rows / elapsed if elapsed > 0 else 0 |
| | print(f"Processed {total_rows:,} rows ({rate:.0f} rows/sec), " |
| | f"found {len(unique_clients):,} unique clients, " |
| | f"skipped {skipped_deletes:,} delete events...", file=sys.stderr) |
| |
|
| | if args.limit and total_rows > args.limit: |
| | break |
| |
|
| | |
| | |
| | for i in range(batch_len): |
| | |
| | method_name = batch['methodName'][i] if 'methodName' in batch else None |
| | |
| | |
| | if method_name == 'session_delete': |
| | skipped_deletes += 1 |
| | continue |
| | |
| | |
| | if method_name != 'initialize': |
| | skipped_other += 1 |
| | continue |
| | |
| | name = batch['name'][i] |
| | version = batch['version'][i] |
| | capabilities = normalize_capabilities(batch['capabilities'][i]) |
| | time_str = batch['time'][i] |
| |
|
| | if not all([name, version, time_str]): |
| | continue |
| |
|
| | |
| | |
| | if isinstance(capabilities, dict): |
| | cap_key = json.dumps(capabilities, sort_keys=True) |
| | else: |
| | cap_key = str(capabilities) if capabilities is not None else None |
| |
|
| | key = (name, version, cap_key) |
| |
|
| | |
| | if key not in unique_clients or time_str > unique_clients[key]['last_seen']: |
| | unique_clients[key] = { |
| | 'name': name, |
| | 'version': version, |
| | 'capabilities': capabilities, |
| | 'last_seen': time_str |
| | } |
| |
|
| | print(f"\nProcessing complete: {total_rows:,} rows processed", file=sys.stderr) |
| | print(f"Skipped {skipped_deletes:,} session_delete events", file=sys.stderr) |
| | if skipped_other > 0: |
| | print(f"Skipped {skipped_other:,} other non-initialize events", file=sys.stderr) |
| | print(f"Found {len(unique_clients):,} unique client configurations", file=sys.stderr) |
| |
|
| | |
| | sorted_clients = sorted( |
| | unique_clients.values(), |
| | key=lambda x: x['last_seen'], |
| | reverse=True |
| | ) |
| |
|
| | |
| | if args.push_to_hub: |
| | push_to_hub(sorted_clients, args.repo_id, split=args.split, token=args.token, private=args.private) |
| | return |
| |
|
| | |
| | if args.output: |
| | out_file = open(args.output, 'w') |
| | else: |
| | out_file = sys.stdout |
| |
|
| | |
| | if args.format == 'ndjson': |
| | for client in sorted_clients: |
| | |
| | if client['capabilities'] == '{}': |
| | client['capabilities'] = {} |
| |
|
| | out_file.write(json.dumps(client) + '\n') |
| |
|
| | elif args.format == 'csv': |
| | |
| | import csv |
| | writer = csv.writer(out_file) |
| | writer.writerow(['name', 'version', 'capabilities', 'last_seen']) |
| |
|
| | for client in sorted_clients: |
| | |
| | caps = client['capabilities'] |
| | if isinstance(caps, dict): |
| | caps_str = json.dumps(caps) |
| | elif caps is None: |
| | caps_str = '' |
| | else: |
| | caps_str = str(caps) |
| |
|
| | writer.writerow([client['name'], client['version'], caps_str, client['last_seen']]) |
| |
|
| | if args.output: |
| | out_file.close() |
| | print(f"Output written to: {args.output}", file=sys.stderr) |
| |
|
| |
|
| | if __name__ == '__main__': |
| | main() |
| |
|