Duplicate from evalstate/mcp-clients
Browse filesCo-authored-by: shaun smith <evalstate@users.noreply.huggingface.co>
- .gitattributes +59 -0
- LICENSE +9 -0
- README.md +123 -0
- data/deduplicated-00000-of-00001.parquet +3 -0
- data/raw-00000-of-00001.parquet +3 -0
- deduplicate_clients.py +257 -0
- deduplicated/deduplicated-00000-of-00001.parquet +3 -0
- extract_mcp_clients.py +293 -0
- pipeline.py +75 -0
- raw/raw-00000-of-00001.parquet +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
# Audio files - uncompressed
|
| 39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
# Audio files - compressed
|
| 43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
# Image files - uncompressed
|
| 49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
# Image files - compressed
|
| 54 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 55 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
# Video files - compressed
|
| 58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
LICENSE
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Hugging Face
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
| 6 |
+
|
| 7 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
| 8 |
+
|
| 9 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
README.md
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
configs:
|
| 3 |
+
- config_name: deduplicated
|
| 4 |
+
data_files:
|
| 5 |
+
- split: deduplicated
|
| 6 |
+
path: deduplicated/deduplicated-*
|
| 7 |
+
- config_name: raw
|
| 8 |
+
data_files:
|
| 9 |
+
- split: raw
|
| 10 |
+
path: raw/raw-*
|
| 11 |
+
license: mit
|
| 12 |
+
tags:
|
| 13 |
+
- code
|
| 14 |
+
- agent
|
| 15 |
+
size_categories:
|
| 16 |
+
- 1K<n<10K
|
| 17 |
+
dataset_info:
|
| 18 |
+
- config_name: deduplicated
|
| 19 |
+
features:
|
| 20 |
+
- name: name
|
| 21 |
+
dtype: string
|
| 22 |
+
- name: versions
|
| 23 |
+
dtype: string
|
| 24 |
+
- name: capabilities
|
| 25 |
+
dtype: string
|
| 26 |
+
- name: last_seen
|
| 27 |
+
dtype: string
|
| 28 |
+
splits:
|
| 29 |
+
- name: deduplicated
|
| 30 |
+
num_bytes: 49101
|
| 31 |
+
num_examples: 486
|
| 32 |
+
download_size: 21632
|
| 33 |
+
dataset_size: 49101
|
| 34 |
+
- config_name: raw
|
| 35 |
+
features:
|
| 36 |
+
- name: name
|
| 37 |
+
dtype: string
|
| 38 |
+
- name: version
|
| 39 |
+
dtype: string
|
| 40 |
+
- name: capabilities
|
| 41 |
+
dtype: string
|
| 42 |
+
- name: last_seen
|
| 43 |
+
dtype: string
|
| 44 |
+
splits:
|
| 45 |
+
- name: raw
|
| 46 |
+
num_bytes: 135102
|
| 47 |
+
num_examples: 1586
|
| 48 |
+
download_size: 40277
|
| 49 |
+
dataset_size: 135102
|
| 50 |
+
---
|
| 51 |
+
|
| 52 |
+
# MCP Clients Dataset
|
| 53 |
+
|
| 54 |
+
This data contains MCP Client identity and capability information logged by the MCP Server at `huggingface.co/mcp`.
|
| 55 |
+
|
| 56 |
+
The data is updated daily (~4AM UTC), and represents the last ~7m sessions.
|
| 57 |
+
|
| 58 |
+
## Dataset Contents
|
| 59 |
+
|
| 60 |
+
The dataset contains two configurations, `raw` and `deduplicated`.
|
| 61 |
+
|
| 62 |
+
Raw contains distinct (name,version,capabilities) and a timestamp of the latest that distinct configuration was seen.
|
| 63 |
+
|
| 64 |
+
Deduplicated removes `mcp-remote` suffixes, groups by distinct (name,capabilities), concatenates the version numbers found and retains the last_seen timestamp. Note that further post-processing may be added to handle specific clients such as `postman`.
|
| 65 |
+
|
| 66 |
+
### Raw Split (`raw`)
|
| 67 |
+
- **name**: MCP client name (e.g., "Cursor", "Anthropic/ClaudeAI", "chat-ui-mcp")
|
| 68 |
+
- **version**: Client version
|
| 69 |
+
- **capabilities**: Client capabilities (JSON string)
|
| 70 |
+
- **last_seen**: Most recent timestamp when this distinct client configuration was seen
|
| 71 |
+
|
| 72 |
+
### Deduplicated Split (`deduplicated`)
|
| 73 |
+
- **name**: MCP client name (normalized, mcp-remote suffix stripped)
|
| 74 |
+
- **versions**: All versions seen for this client (comma-separated)
|
| 75 |
+
- **capabilities**: Client capabilities (JSON string)
|
| 76 |
+
- **last_seen**: Most recent timestamp when this distinct client configuration was seen
|
| 77 |
+
|
| 78 |
+
## Notes about the Data
|
| 79 |
+
|
| 80 |
+
- Some Clients only send capabilities that have been configured (for example, `fast-agent` only sends the `roots` capability if roots are configured)
|
| 81 |
+
- There are a number of capabilities out of specification that are helpful to track (e.g. MCP-UI, Skybridge, Apps SDK). We encourage the use of `experimental` to advertise these capabilities for tracking.
|
| 82 |
+
- The `deduplicated` split groups clients by (name, capabilities), concatenating all versions together.
|
| 83 |
+
- The Protocol Version not currently captured - future datasets may include this
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
## Usage
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
from datasets import load_dataset
|
| 90 |
+
|
| 91 |
+
# Load raw clients
|
| 92 |
+
raw_ds = load_dataset('evalstate/mcp-clients', 'raw')
|
| 93 |
+
for client in raw_ds['raw']:
|
| 94 |
+
print(f"{client['name']} v{client['version']}")
|
| 95 |
+
|
| 96 |
+
# Load deduplicated clients
|
| 97 |
+
dedup_ds = load_dataset('evalstate/mcp-clients', 'deduplicated')
|
| 98 |
+
for client in dedup_ds['deduplicated']:
|
| 99 |
+
print(f"{client['name']}: {client['versions']}")
|
| 100 |
+
```
|
| 101 |
+
|
| 102 |
+
## Updating the Dataset
|
| 103 |
+
|
| 104 |
+
The dataset is kept up-to-date via Hugging Face Jobs:
|
| 105 |
+
|
| 106 |
+
1. **Extract step** (`raw` split):
|
| 107 |
+
```bash
|
| 108 |
+
uv run https://huggingface.co/datasets/evalstate/mcp-clients/resolve/main/extract_mcp_clients.py --push-to-hub --split raw
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
2. **Deduplicate step** (`deduplicated` split):
|
| 112 |
+
```bash
|
| 113 |
+
uv run https://huggingface.co/datasets/evalstate/mcp-clients/resolve/main/deduplicate_clients.py --push-to-hub
|
| 114 |
+
```
|
| 115 |
+
|
| 116 |
+
Or run the full pipeline:
|
| 117 |
+
```bash
|
| 118 |
+
uv run https://huggingface.co/datasets/evalstate/mcp-clients/resolve/main/pipeline.py
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
## License
|
| 122 |
+
|
| 123 |
+
MIT
|
data/deduplicated-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0022e0aa74f26cdad480557f76e8e3aa8beb9ff6ae925db1d3a2df638b713850
|
| 3 |
+
size 19027
|
data/raw-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2f4c75203c4976394ccdeada2697d660957abe801871a5e331fe39b0716a5247
|
| 3 |
+
size 35096
|
deduplicate_clients.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env -S uv run
|
| 2 |
+
# /// script
|
| 3 |
+
# requires-python = ">=3.10"
|
| 4 |
+
# dependencies = [
|
| 5 |
+
# "datasets",
|
| 6 |
+
# "huggingface_hub",
|
| 7 |
+
# ]
|
| 8 |
+
# ///
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
Deduplicate MCP clients by name and capabilities.
|
| 12 |
+
|
| 13 |
+
This script takes the existing mcp-clients dataset and creates a deduplicated view
|
| 14 |
+
where rows are unique by (name, capabilities). When multiple versions exist for
|
| 15 |
+
the same (name, capabilities), they are concatenated with "," in a versions column.
|
| 16 |
+
|
| 17 |
+
It also strips " (via mcp-remote X.Y.Z)" suffixes from client names and reports
|
| 18 |
+
the mcp-remote versions encountered.
|
| 19 |
+
|
| 20 |
+
Usage:
|
| 21 |
+
# Push deduplicated data to the same repo (evalstate/mcp-clients) under 'deduplicated' split
|
| 22 |
+
uv run deduplicate_clients.py --push-to-hub
|
| 23 |
+
|
| 24 |
+
# Read from raw, push to dedup split in same repo
|
| 25 |
+
uv run deduplicate_clients.py --source-split raw --target-split deduplicated --push-to-hub
|
| 26 |
+
|
| 27 |
+
# Output to local file
|
| 28 |
+
uv run deduplicate_clients.py -o deduplicated.ndjson
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
import sys
|
| 32 |
+
import json
|
| 33 |
+
import re
|
| 34 |
+
import argparse
|
| 35 |
+
from datetime import datetime
|
| 36 |
+
from collections import defaultdict
|
| 37 |
+
|
| 38 |
+
from datasets import Dataset, Features, Value, load_dataset
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Regex to match "(via mcp-remote X.Y.Z)" suffix
|
| 42 |
+
MCP_REMOTE_PATTERN = re.compile(r'\s*\(via mcp-remote [\d.]+\)')
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def strip_mcp_remote_suffix(name):
|
| 46 |
+
"""Remove the '(via mcp-remote X.Y.Z)' suffix from client name."""
|
| 47 |
+
match = MCP_REMOTE_PATTERN.search(name)
|
| 48 |
+
if match:
|
| 49 |
+
# Extract the version number from the suffix
|
| 50 |
+
suffix = match.group(0)
|
| 51 |
+
version_match = re.search(r'mcp-remote ([\d.]+)', suffix)
|
| 52 |
+
if version_match:
|
| 53 |
+
version = version_match.group(1)
|
| 54 |
+
return MCP_REMOTE_PATTERN.sub('', name).strip(), version
|
| 55 |
+
return name, None
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def normalize_capabilities(caps):
|
| 59 |
+
"""Normalize capabilities for comparison."""
|
| 60 |
+
if caps is None:
|
| 61 |
+
return None
|
| 62 |
+
if isinstance(caps, str):
|
| 63 |
+
if caps == '{}':
|
| 64 |
+
return {}
|
| 65 |
+
try:
|
| 66 |
+
return json.loads(caps)
|
| 67 |
+
except Exception:
|
| 68 |
+
return caps
|
| 69 |
+
return caps
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def capabilities_to_string(caps):
|
| 73 |
+
"""Convert capabilities to JSON string for storage."""
|
| 74 |
+
if caps is None:
|
| 75 |
+
return ''
|
| 76 |
+
if isinstance(caps, dict):
|
| 77 |
+
return json.dumps(caps, sort_keys=True)
|
| 78 |
+
return str(caps)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def create_dataset_from_deduplicated(deduplicated_list, features=None):
|
| 82 |
+
"""Create a Hugging Face Dataset from the deduplicated list."""
|
| 83 |
+
if features is None:
|
| 84 |
+
# Use 'versions' (plural) for the deduplicated split
|
| 85 |
+
features = Features({
|
| 86 |
+
'name': Value('string'),
|
| 87 |
+
'versions': Value('string'),
|
| 88 |
+
'capabilities': Value('string'),
|
| 89 |
+
'last_seen': Value('string'),
|
| 90 |
+
})
|
| 91 |
+
|
| 92 |
+
records = []
|
| 93 |
+
for client in deduplicated_list:
|
| 94 |
+
records.append({
|
| 95 |
+
'name': client['name'],
|
| 96 |
+
'versions': client['versions'],
|
| 97 |
+
'capabilities': capabilities_to_string(client['capabilities']),
|
| 98 |
+
'last_seen': client['last_seen'],
|
| 99 |
+
})
|
| 100 |
+
|
| 101 |
+
return Dataset.from_list(records, features=features)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def push_to_hub(deduplicated_list, repo_id, split=None, token=None, private=False):
|
| 105 |
+
"""Push the deduplicated dataset to Hugging Face Hub."""
|
| 106 |
+
dataset = create_dataset_from_deduplicated(deduplicated_list)
|
| 107 |
+
|
| 108 |
+
if split:
|
| 109 |
+
print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: deduplicated, split: {split})", file=sys.stderr)
|
| 110 |
+
else:
|
| 111 |
+
print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: deduplicated)", file=sys.stderr)
|
| 112 |
+
|
| 113 |
+
# Push to hub with explicit config_name to avoid creating "default" config
|
| 114 |
+
# This ensures the data goes to the "deduplicated" config, not a "default" config
|
| 115 |
+
dataset.push_to_hub(
|
| 116 |
+
repo_id=repo_id,
|
| 117 |
+
config_name="deduplicated", # Explicitly set config name to avoid "default" config creation
|
| 118 |
+
split=split,
|
| 119 |
+
token=token,
|
| 120 |
+
private=private,
|
| 121 |
+
commit_message=f"Update deduplicated clients ({datetime.now().strftime('%Y-%m-%d %H:%M')})",
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
print(f"Successfully pushed {len(deduplicated_list):,} deduplicated records to {repo_id}", file=sys.stderr)
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
def main():
|
| 128 |
+
parser = argparse.ArgumentParser(
|
| 129 |
+
description="Deduplicate MCP clients by name and capabilities",
|
| 130 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 131 |
+
epilog="""
|
| 132 |
+
Examples:
|
| 133 |
+
# Push deduplicated data to same repo under 'deduplicated' split
|
| 134 |
+
%(prog)s --push-to-hub
|
| 135 |
+
|
| 136 |
+
# Use custom source/target splits
|
| 137 |
+
%(prog)s --source-split raw --target-split dedup --push-to-hub
|
| 138 |
+
|
| 139 |
+
# Output to local file
|
| 140 |
+
%(prog)s -o deduplicated.ndjson
|
| 141 |
+
|
| 142 |
+
# Push to a different repo
|
| 143 |
+
%(prog)s --target-repo my-org/mcp-clients-dedup --push-to-hub
|
| 144 |
+
"""
|
| 145 |
+
)
|
| 146 |
+
parser.add_argument("-o", "--output", help="Output file path (default: stdout)")
|
| 147 |
+
parser.add_argument("--source-repo", default="evalstate/mcp-clients",
|
| 148 |
+
help="Source repository ID (default: evalstate/mcp-clients)")
|
| 149 |
+
parser.add_argument("--source-config", default="raw",
|
| 150 |
+
help="Source config name (default: raw)")
|
| 151 |
+
parser.add_argument("--source-split", default=None,
|
| 152 |
+
help="Source split name (optional, defaults to config name)")
|
| 153 |
+
parser.add_argument("--target-repo", default="evalstate/mcp-clients",
|
| 154 |
+
help="Target repository ID (default: evalstate/mcp-clients)")
|
| 155 |
+
parser.add_argument("--target-split", default="deduplicated",
|
| 156 |
+
help="Target split name (default: deduplicated)")
|
| 157 |
+
parser.add_argument("--push-to-hub", action="store_true",
|
| 158 |
+
help="Push the resulting dataset to Hugging Face Hub")
|
| 159 |
+
parser.add_argument("--token", default=None,
|
| 160 |
+
help="HF token (defaults to HF_TOKEN env var or cached token)")
|
| 161 |
+
parser.add_argument("--private", action="store_true",
|
| 162 |
+
help="Create/push to a private repository")
|
| 163 |
+
args = parser.parse_args()
|
| 164 |
+
|
| 165 |
+
# Use config name as split if not specified
|
| 166 |
+
source_split = args.source_split or args.source_config
|
| 167 |
+
|
| 168 |
+
print(f"[{datetime.now().isoformat()}] Loading dataset from {args.source_repo} (config: {args.source_config}, split: {source_split})...", file=sys.stderr)
|
| 169 |
+
|
| 170 |
+
# Load the source dataset using config name (required for multi-config datasets)
|
| 171 |
+
ds = load_dataset(args.source_repo, args.source_config, split=source_split)
|
| 172 |
+
|
| 173 |
+
print(f"[{datetime.now().isoformat()}] Loaded {len(ds):,} client records", file=sys.stderr)
|
| 174 |
+
|
| 175 |
+
# Track mcp-remote versions found
|
| 176 |
+
mcp_remote_versions = defaultdict(int)
|
| 177 |
+
|
| 178 |
+
# Group by (name, capabilities)
|
| 179 |
+
groups = defaultdict(list)
|
| 180 |
+
|
| 181 |
+
for row in ds:
|
| 182 |
+
# Strip mcp-remote suffix from name
|
| 183 |
+
clean_name, mcp_version = strip_mcp_remote_suffix(row['name'])
|
| 184 |
+
|
| 185 |
+
if mcp_version:
|
| 186 |
+
mcp_remote_versions[mcp_version] += 1
|
| 187 |
+
|
| 188 |
+
# Normalize capabilities for grouping
|
| 189 |
+
caps = normalize_capabilities(row['capabilities'])
|
| 190 |
+
caps_key = capabilities_to_string(caps)
|
| 191 |
+
|
| 192 |
+
key = (clean_name, caps_key)
|
| 193 |
+
groups[key].append({
|
| 194 |
+
'name': clean_name,
|
| 195 |
+
'version': row['version'],
|
| 196 |
+
'capabilities': caps,
|
| 197 |
+
'last_seen': row['last_seen'],
|
| 198 |
+
})
|
| 199 |
+
|
| 200 |
+
print(f"[{datetime.now().isoformat()}] Found {len(groups):,} unique (name, capabilities) combinations", file=sys.stderr)
|
| 201 |
+
|
| 202 |
+
# Print mcp-remote version summary
|
| 203 |
+
if mcp_remote_versions:
|
| 204 |
+
total_with_mcp_remote = sum(mcp_remote_versions.values())
|
| 205 |
+
print(f"[{datetime.now().isoformat()}] MCP-remote version summary:", file=sys.stderr)
|
| 206 |
+
print(f" Total records with mcp-remote suffix: {total_with_mcp_remote:,}", file=sys.stderr)
|
| 207 |
+
print(f" Unique mcp-remote versions: {len(mcp_remote_versions)}", file=sys.stderr)
|
| 208 |
+
print(f" Versions encountered:", file=sys.stderr)
|
| 209 |
+
for version in sorted(mcp_remote_versions.keys()):
|
| 210 |
+
print(f" - mcp-remote {version}: {mcp_remote_versions[version]:,} records", file=sys.stderr)
|
| 211 |
+
print()
|
| 212 |
+
|
| 213 |
+
# Create deduplicated records
|
| 214 |
+
deduplicated = []
|
| 215 |
+
|
| 216 |
+
for (name, caps_key), records in groups.items():
|
| 217 |
+
# Find the latest last_seen
|
| 218 |
+
latest_record = max(records, key=lambda x: x['last_seen'])
|
| 219 |
+
|
| 220 |
+
# Collect all unique versions (sorted)
|
| 221 |
+
versions = sorted(set(r['version'] for r in records))
|
| 222 |
+
versions_str = ','.join(versions)
|
| 223 |
+
|
| 224 |
+
deduplicated.append({
|
| 225 |
+
'name': name,
|
| 226 |
+
'versions': versions_str,
|
| 227 |
+
'capabilities': latest_record['capabilities'],
|
| 228 |
+
'last_seen': latest_record['last_seen'],
|
| 229 |
+
})
|
| 230 |
+
|
| 231 |
+
# Sort by last_seen descending (so latest are at the beginning)
|
| 232 |
+
deduplicated.sort(key=lambda x: x['last_seen'], reverse=True)
|
| 233 |
+
|
| 234 |
+
print(f"[{datetime.now().isoformat()}] Created {len(deduplicated):,} deduplicated records", file=sys.stderr)
|
| 235 |
+
|
| 236 |
+
# Handle push to hub
|
| 237 |
+
if args.push_to_hub:
|
| 238 |
+
push_to_hub(deduplicated, args.target_repo, split=args.target_split, token=args.token, private=args.private)
|
| 239 |
+
return
|
| 240 |
+
|
| 241 |
+
# Handle local output
|
| 242 |
+
if args.output:
|
| 243 |
+
out_file = open(args.output, 'w')
|
| 244 |
+
else:
|
| 245 |
+
out_file = sys.stdout
|
| 246 |
+
|
| 247 |
+
# Output as NDJSON
|
| 248 |
+
for client in deduplicated:
|
| 249 |
+
out_file.write(json.dumps(client) + '\n')
|
| 250 |
+
|
| 251 |
+
if args.output:
|
| 252 |
+
out_file.close()
|
| 253 |
+
print(f"Output written to: {args.output}", file=sys.stderr)
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
if __name__ == '__main__':
|
| 257 |
+
main()
|
deduplicated/deduplicated-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:282bd453cc15756598a88d941192cfe38ef784d3ff105568193c26d4c5f39f48
|
| 3 |
+
size 21632
|
extract_mcp_clients.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env -S uv run
|
| 2 |
+
# /// script
|
| 3 |
+
# requires-python = ">=3.10"
|
| 4 |
+
# dependencies = [
|
| 5 |
+
# "datasets",
|
| 6 |
+
# "huggingface_hub",
|
| 7 |
+
# ]
|
| 8 |
+
# ///
|
| 9 |
+
"""
|
| 10 |
+
Extract unique MCP client combinations (name, version, capabilities) from evalstate/hf-mcp-logs dataset.
|
| 11 |
+
Only processes "initialize" method calls - ignores "session_delete" events which lack complete client information.
|
| 12 |
+
Tracks most recent "last seen" timestamp for each unique client configuration.
|
| 13 |
+
Uses batched streaming for efficient I/O on large datasets (~7M rows).
|
| 14 |
+
|
| 15 |
+
Usage:
|
| 16 |
+
# Process all data and save to local file
|
| 17 |
+
python3.10 extract_mcp_clients.py -o clients.ndjson
|
| 18 |
+
|
| 19 |
+
# Push to Hugging Face Hub (creates/updates evalstate/mcp-clients dataset)
|
| 20 |
+
python3.10 extract_mcp_clients.py --push-to-hub
|
| 21 |
+
|
| 22 |
+
# Push to a specific split (for scheduled pipelines)
|
| 23 |
+
python3.10 extract_mcp_clients.py --push-to-hub --split raw
|
| 24 |
+
|
| 25 |
+
# Process a sample for testing
|
| 26 |
+
python3.10 extract_mcp_clients.py --limit 10000 -o sample.ndjson
|
| 27 |
+
|
| 28 |
+
Output fields:
|
| 29 |
+
- name: MCP client name (e.g., "Cursor", "Anthropic/ClaudeAI", "chat-ui-mcp")
|
| 30 |
+
- version: Client version
|
| 31 |
+
- capabilities: Client capabilities (JSON object or null)
|
| 32 |
+
- last_seen: Most recent timestamp when this client was seen
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
import argparse
|
| 36 |
+
import json
|
| 37 |
+
import sys
|
| 38 |
+
from datetime import datetime
|
| 39 |
+
from pathlib import Path
|
| 40 |
+
|
| 41 |
+
from datasets import Dataset, Features, Value
|
| 42 |
+
from huggingface_hub import HfApi
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def normalize_capabilities(caps):
|
| 46 |
+
"""Normalize capabilities for comparison."""
|
| 47 |
+
if caps is None:
|
| 48 |
+
return None
|
| 49 |
+
if isinstance(caps, str):
|
| 50 |
+
# Parse stringified JSON if needed
|
| 51 |
+
if caps == '{}':
|
| 52 |
+
return {}
|
| 53 |
+
try:
|
| 54 |
+
return json.loads(caps)
|
| 55 |
+
except Exception:
|
| 56 |
+
return caps
|
| 57 |
+
return caps
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def capabilities_to_string(caps):
|
| 61 |
+
"""Convert capabilities to JSON string for storage."""
|
| 62 |
+
if caps is None:
|
| 63 |
+
return ''
|
| 64 |
+
if isinstance(caps, dict):
|
| 65 |
+
return json.dumps(caps, sort_keys=True)
|
| 66 |
+
return str(caps)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def create_dataset_from_clients(clients_list, features=None):
|
| 70 |
+
"""Create a Hugging Face Dataset from the clients list."""
|
| 71 |
+
if features is None:
|
| 72 |
+
features = Features({
|
| 73 |
+
'name': Value('string'),
|
| 74 |
+
'version': Value('string'),
|
| 75 |
+
'capabilities': Value('string'), # Store as JSON string for consistency
|
| 76 |
+
'last_seen': Value('string'),
|
| 77 |
+
})
|
| 78 |
+
|
| 79 |
+
# Convert to records format for Dataset
|
| 80 |
+
records = []
|
| 81 |
+
for client in clients_list:
|
| 82 |
+
record = {
|
| 83 |
+
'name': client['name'],
|
| 84 |
+
'version': client['version'],
|
| 85 |
+
'capabilities': capabilities_to_string(client['capabilities']),
|
| 86 |
+
'last_seen': client['last_seen'],
|
| 87 |
+
}
|
| 88 |
+
records.append(record)
|
| 89 |
+
|
| 90 |
+
return Dataset.from_list(records, features=features)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def push_to_hub(clients_list, repo_id, split=None, token=None, private=False):
|
| 94 |
+
"""Push the clients dataset to Hugging Face Hub."""
|
| 95 |
+
# Create dataset from clients
|
| 96 |
+
dataset = create_dataset_from_clients(clients_list)
|
| 97 |
+
|
| 98 |
+
# Determine split for push
|
| 99 |
+
if split:
|
| 100 |
+
print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: raw, split: {split})", file=sys.stderr)
|
| 101 |
+
else:
|
| 102 |
+
print(f"Pushing dataset to https://huggingface.co/datasets/{repo_id} (config: raw)", file=sys.stderr)
|
| 103 |
+
|
| 104 |
+
# Push to hub with explicit config_name to avoid creating "default" config
|
| 105 |
+
# This ensures the data goes to the "raw" config, not a "default" config
|
| 106 |
+
dataset.push_to_hub(
|
| 107 |
+
repo_id=repo_id,
|
| 108 |
+
config_name="raw", # Explicitly set config name to avoid "default" config creation
|
| 109 |
+
split=split,
|
| 110 |
+
token=token,
|
| 111 |
+
private=private,
|
| 112 |
+
commit_message=f"Update MCP clients dataset ({datetime.now().strftime('%Y-%m-%d %H:%M')})",
|
| 113 |
+
)
|
| 114 |
+
print(f"Successfully pushed {len(clients_list):,} clients to {repo_id}", file=sys.stderr)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def main():
|
| 118 |
+
parser = argparse.ArgumentParser(
|
| 119 |
+
description="Extract unique MCP clients with last seen timestamp from evalstate/hf-mcp-logs",
|
| 120 |
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
| 121 |
+
epilog="""
|
| 122 |
+
Examples:
|
| 123 |
+
# Process all data and save to local file
|
| 124 |
+
%(prog)s -o clients.ndjson
|
| 125 |
+
|
| 126 |
+
# Push to Hugging Face Hub (requires authentication: `hf auth login`)
|
| 127 |
+
%(prog)s --push-to-hub
|
| 128 |
+
|
| 129 |
+
# Push to a specific split (ideal for scheduled jobs/pipelines)
|
| 130 |
+
%(prog)s --push-to-hub --split raw
|
| 131 |
+
|
| 132 |
+
# Process with limit (for testing)
|
| 133 |
+
%(prog)s --limit 10000 -o sample.ndjson
|
| 134 |
+
|
| 135 |
+
# Push to a private repo
|
| 136 |
+
%(prog)s --push-to-hub --private
|
| 137 |
+
"""
|
| 138 |
+
)
|
| 139 |
+
parser.add_argument("-o", "--output", help="Output file path (default: stdout)")
|
| 140 |
+
parser.add_argument("--limit", type=int,
|
| 141 |
+
help="Limit processing to N rows (useful for testing)")
|
| 142 |
+
parser.add_argument("--format", choices=["ndjson", "csv"], default="ndjson",
|
| 143 |
+
help="Output format (default: ndjson)")
|
| 144 |
+
parser.add_argument("--batch-size", type=int, default=1000,
|
| 145 |
+
help="Batch size for streaming (default: 1000). Larger values may "
|
| 146 |
+
"improve I/O efficiency but use more memory.")
|
| 147 |
+
parser.add_argument("--push-to-hub", action="store_true",
|
| 148 |
+
help="Push the resulting dataset to Hugging Face Hub")
|
| 149 |
+
parser.add_argument("--split", default=None,
|
| 150 |
+
help="Split name when pushing to Hub (e.g., 'raw' for scheduled pipelines)")
|
| 151 |
+
parser.add_argument("--repo-id", default="evalstate/mcp-clients",
|
| 152 |
+
help="HF Hub repository ID (default: evalstate/mcp-clients)")
|
| 153 |
+
parser.add_argument("--token", default=None,
|
| 154 |
+
help="HF token (defaults to HF_TOKEN env var or cached token)")
|
| 155 |
+
parser.add_argument("--private", action="store_true",
|
| 156 |
+
help="Create/push to a private repository")
|
| 157 |
+
args = parser.parse_args()
|
| 158 |
+
|
| 159 |
+
# Dictionary to track unique (name, version, capabilities) -> data
|
| 160 |
+
unique_clients = {}
|
| 161 |
+
|
| 162 |
+
# Load dataset in streaming mode with batch processing
|
| 163 |
+
print(f"Loading dataset: evalstate/hf-mcp-logs (sessions split)", file=sys.stderr)
|
| 164 |
+
print(f"Using batch size: {args.batch_size}", file=sys.stderr)
|
| 165 |
+
|
| 166 |
+
from datasets import load_dataset
|
| 167 |
+
ds = load_dataset('evalstate/hf-mcp-logs', 'sessions', streaming=True)
|
| 168 |
+
# Access sessions split
|
| 169 |
+
sessions_ds = ds['sessions']
|
| 170 |
+
|
| 171 |
+
total_rows = 0
|
| 172 |
+
skipped_deletes = 0
|
| 173 |
+
skipped_other = 0
|
| 174 |
+
start_time = None
|
| 175 |
+
|
| 176 |
+
# Process in batches for better I/O efficiency
|
| 177 |
+
for batch in sessions_ds.iter(batch_size=args.batch_size):
|
| 178 |
+
batch_len = len(batch['name']) # All columns have same length
|
| 179 |
+
total_rows += batch_len
|
| 180 |
+
|
| 181 |
+
# Progress indicator every 100k rows
|
| 182 |
+
if total_rows % 100000 == 0:
|
| 183 |
+
if start_time is None:
|
| 184 |
+
start_time = datetime.now()
|
| 185 |
+
elapsed = (datetime.now() - start_time).total_seconds()
|
| 186 |
+
rate = total_rows / elapsed if elapsed > 0 else 0
|
| 187 |
+
print(f"Processed {total_rows:,} rows ({rate:.0f} rows/sec), "
|
| 188 |
+
f"found {len(unique_clients):,} unique clients, "
|
| 189 |
+
f"skipped {skipped_deletes:,} delete events...", file=sys.stderr)
|
| 190 |
+
|
| 191 |
+
if args.limit and total_rows > args.limit:
|
| 192 |
+
break
|
| 193 |
+
|
| 194 |
+
# Process each row in batch
|
| 195 |
+
# Batch is a dict where keys are column names and values are lists
|
| 196 |
+
for i in range(batch_len):
|
| 197 |
+
# Check methodName field - only process 'initialize' events
|
| 198 |
+
method_name = batch['methodName'][i] if 'methodName' in batch else None
|
| 199 |
+
|
| 200 |
+
# Skip session_delete events - they don't have meaningful client information
|
| 201 |
+
if method_name == 'session_delete':
|
| 202 |
+
skipped_deletes += 1
|
| 203 |
+
continue
|
| 204 |
+
|
| 205 |
+
# Only process initialize events
|
| 206 |
+
if method_name != 'initialize':
|
| 207 |
+
skipped_other += 1
|
| 208 |
+
continue
|
| 209 |
+
|
| 210 |
+
name = batch['name'][i]
|
| 211 |
+
version = batch['version'][i]
|
| 212 |
+
capabilities = normalize_capabilities(batch['capabilities'][i])
|
| 213 |
+
time_str = batch['time'][i]
|
| 214 |
+
|
| 215 |
+
if not all([name, version, time_str]):
|
| 216 |
+
continue
|
| 217 |
+
|
| 218 |
+
# Create composite key for deduplication
|
| 219 |
+
# For capabilities, we need a hashable representation
|
| 220 |
+
if isinstance(capabilities, dict):
|
| 221 |
+
cap_key = json.dumps(capabilities, sort_keys=True)
|
| 222 |
+
else:
|
| 223 |
+
cap_key = str(capabilities) if capabilities is not None else None
|
| 224 |
+
|
| 225 |
+
key = (name, version, cap_key)
|
| 226 |
+
|
| 227 |
+
# Update if this entry is newer
|
| 228 |
+
if key not in unique_clients or time_str > unique_clients[key]['last_seen']:
|
| 229 |
+
unique_clients[key] = {
|
| 230 |
+
'name': name,
|
| 231 |
+
'version': version,
|
| 232 |
+
'capabilities': capabilities,
|
| 233 |
+
'last_seen': time_str
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
print(f"\nProcessing complete: {total_rows:,} rows processed", file=sys.stderr)
|
| 237 |
+
print(f"Skipped {skipped_deletes:,} session_delete events", file=sys.stderr)
|
| 238 |
+
if skipped_other > 0:
|
| 239 |
+
print(f"Skipped {skipped_other:,} other non-initialize events", file=sys.stderr)
|
| 240 |
+
print(f"Found {len(unique_clients):,} unique client configurations", file=sys.stderr)
|
| 241 |
+
|
| 242 |
+
# Sort by last_seen descending (most recent first)
|
| 243 |
+
sorted_clients = sorted(
|
| 244 |
+
unique_clients.values(),
|
| 245 |
+
key=lambda x: x['last_seen'],
|
| 246 |
+
reverse=True
|
| 247 |
+
)
|
| 248 |
+
|
| 249 |
+
# Handle push to hub
|
| 250 |
+
if args.push_to_hub:
|
| 251 |
+
push_to_hub(sorted_clients, args.repo_id, split=args.split, token=args.token, private=args.private)
|
| 252 |
+
return
|
| 253 |
+
|
| 254 |
+
# Handle local output
|
| 255 |
+
if args.output:
|
| 256 |
+
out_file = open(args.output, 'w')
|
| 257 |
+
else:
|
| 258 |
+
out_file = sys.stdout
|
| 259 |
+
|
| 260 |
+
# Output results
|
| 261 |
+
if args.format == 'ndjson':
|
| 262 |
+
for client in sorted_clients:
|
| 263 |
+
# Ensure capabilities is output properly
|
| 264 |
+
if client['capabilities'] == '{}':
|
| 265 |
+
client['capabilities'] = {}
|
| 266 |
+
|
| 267 |
+
out_file.write(json.dumps(client) + '\n')
|
| 268 |
+
|
| 269 |
+
elif args.format == 'csv':
|
| 270 |
+
# Write CSV header
|
| 271 |
+
import csv
|
| 272 |
+
writer = csv.writer(out_file)
|
| 273 |
+
writer.writerow(['name', 'version', 'capabilities', 'last_seen'])
|
| 274 |
+
|
| 275 |
+
for client in sorted_clients:
|
| 276 |
+
# Convert capabilities to string for CSV
|
| 277 |
+
caps = client['capabilities']
|
| 278 |
+
if isinstance(caps, dict):
|
| 279 |
+
caps_str = json.dumps(caps)
|
| 280 |
+
elif caps is None:
|
| 281 |
+
caps_str = ''
|
| 282 |
+
else:
|
| 283 |
+
caps_str = str(caps)
|
| 284 |
+
|
| 285 |
+
writer.writerow([client['name'], client['version'], caps_str, client['last_seen']])
|
| 286 |
+
|
| 287 |
+
if args.output:
|
| 288 |
+
out_file.close()
|
| 289 |
+
print(f"Output written to: {args.output}", file=sys.stderr)
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
if __name__ == '__main__':
|
| 293 |
+
main()
|
pipeline.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env -S uv run
|
| 2 |
+
# /// script
|
| 3 |
+
# requires-python = ">=3.10"
|
| 4 |
+
# dependencies = [
|
| 5 |
+
# "datasets",
|
| 6 |
+
# "huggingface_hub",
|
| 7 |
+
# ]
|
| 8 |
+
# ///
|
| 9 |
+
"""
|
| 10 |
+
MCP Clients Pipeline
|
| 11 |
+
|
| 12 |
+
Runs the full extract → deduplicate pipeline for the MCP clients dataset.
|
| 13 |
+
|
| 14 |
+
Usage:
|
| 15 |
+
uv run https://huggingface.co/datasets/evalstate/mcp-clients/resolve/main/pipeline.py
|
| 16 |
+
|
| 17 |
+
The pipeline:
|
| 18 |
+
1. Extracts unique clients from evalstate/hf-mcp-logs
|
| 19 |
+
2. Pushes to evalstate/mcp-clients (raw split)
|
| 20 |
+
3. Creates deduplicated view
|
| 21 |
+
4. Pushes to evalstate/mcp-clients (deduplicated split)
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import subprocess
|
| 25 |
+
import sys
|
| 26 |
+
from datetime import datetime
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def run_script(url: str, args: list[str]) -> bool:
|
| 30 |
+
"""Run a script via uv run and return success status."""
|
| 31 |
+
cmd = ["uv", "run", url] + args
|
| 32 |
+
print(f"[{datetime.now().isoformat()}] Running: {' '.join(cmd)}", file=sys.stderr)
|
| 33 |
+
|
| 34 |
+
result = subprocess.run(cmd, capture_output=False)
|
| 35 |
+
|
| 36 |
+
if result.returncode == 0:
|
| 37 |
+
print(f"[{datetime.now().isoformat()}] Success", file=sys.stderr)
|
| 38 |
+
return True
|
| 39 |
+
else:
|
| 40 |
+
print(f"[{datetime.now().isoformat()}] Failed with exit code {result.returncode}", file=sys.stderr)
|
| 41 |
+
return False
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
base_url = "https://huggingface.co/datasets/evalstate/mcp-clients/resolve/main"
|
| 46 |
+
|
| 47 |
+
print(f"[{datetime.now().isoformat()}] Starting MCP clients pipeline...", file=sys.stderr)
|
| 48 |
+
|
| 49 |
+
# Step 1: Extract to raw split
|
| 50 |
+
print(f"[{datetime.now().isoformat()}] Step 1: Extracting clients from hf-mcp-logs...", file=sys.stderr)
|
| 51 |
+
success = run_script(
|
| 52 |
+
f"{base_url}/extract_mcp_clients.py",
|
| 53 |
+
["--push-to-hub", "--split", "raw"]
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
if not success:
|
| 57 |
+
print(f"[{datetime.now().isoformat()}] Pipeline failed at extract step", file=sys.stderr)
|
| 58 |
+
sys.exit(1)
|
| 59 |
+
|
| 60 |
+
# Step 2: Deduplicate to deduplicated split
|
| 61 |
+
print(f"[{datetime.now().isoformat()}] Step 2: Creating deduplicated view...", file=sys.stderr)
|
| 62 |
+
success = run_script(
|
| 63 |
+
f"{base_url}/deduplicate_clients.py",
|
| 64 |
+
["--push-to-hub"]
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
if not success:
|
| 68 |
+
print(f"[{datetime.now().isoformat()}] Pipeline failed at dedup step", file=sys.stderr)
|
| 69 |
+
sys.exit(1)
|
| 70 |
+
|
| 71 |
+
print(f"[{datetime.now().isoformat()}] Pipeline complete!", file=sys.stderr)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
main()
|
raw/raw-00000-of-00001.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b7d191f645d38f4a76153b696e0108a192868fe661708d5a05a4bade366cb127
|
| 3 |
+
size 40277
|