Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_upload_large_folder.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/community.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/constants.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/dataclasses.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/fastai_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/file_download.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/lfs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard_data.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__init__.py +13 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/_cli_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/_errors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/auth.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/cache.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/collections.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/datasets.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/download.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/hf.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/inference_endpoints.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/jobs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/lfs.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/models.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/papers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/repo.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/repo_files.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/skills.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/spaces.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/system.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/upload.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/upload_large_folder.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/_cli_utils.py +513 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/_errors.py +44 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/auth.py +157 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/cache.py +811 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/collections.py +331 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/datasets.py +126 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/download.py +197 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/hf.py +98 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/inference_endpoints.py +456 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/jobs.py +1078 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/lfs.py +175 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/models.py +125 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/papers.py +98 -0
- venv/lib/python3.10/site-packages/huggingface_hub/cli/repo.py +336 -0
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_upload_large_folder.cpython-310.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc
ADDED
|
Binary file (4.03 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_server.cpython-310.pyc
ADDED
|
Binary file (14.1 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/community.cpython-310.pyc
ADDED
|
Binary file (13.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/constants.cpython-310.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/dataclasses.cpython-310.pyc
ADDED
|
Binary file (17.8 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc
ADDED
|
Binary file (15.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/fastai_utils.cpython-310.pyc
ADDED
|
Binary file (14.4 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/file_download.cpython-310.pyc
ADDED
|
Binary file (52.1 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hf_file_system.cpython-310.pyc
ADDED
|
Binary file (38.5 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc
ADDED
|
Binary file (28.5 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/lfs.cpython-310.pyc
ADDED
|
Binary file (12 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc
ADDED
|
Binary file (29.7 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard_data.cpython-310.pyc
ADDED
|
Binary file (29.5 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (290 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/_cli_utils.cpython-310.pyc
ADDED
|
Binary file (16.7 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/_errors.cpython-310.pyc
ADDED
|
Binary file (1.79 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/auth.cpython-310.pyc
ADDED
|
Binary file (4.24 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/cache.cpython-310.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/collections.cpython-310.pyc
ADDED
|
Binary file (8.99 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/datasets.cpython-310.pyc
ADDED
|
Binary file (3.49 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/download.cpython-310.pyc
ADDED
|
Binary file (5.55 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/hf.cpython-310.pyc
ADDED
|
Binary file (3.04 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/inference_endpoints.cpython-310.pyc
ADDED
|
Binary file (10.6 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/jobs.cpython-310.pyc
ADDED
|
Binary file (26.2 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/lfs.cpython-310.pyc
ADDED
|
Binary file (4.19 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/models.cpython-310.pyc
ADDED
|
Binary file (3.45 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/papers.cpython-310.pyc
ADDED
|
Binary file (2.46 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/repo.cpython-310.pyc
ADDED
|
Binary file (8.28 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/repo_files.cpython-310.pyc
ADDED
|
Binary file (2.53 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/skills.cpython-310.pyc
ADDED
|
Binary file (5.41 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/spaces.cpython-310.pyc
ADDED
|
Binary file (3.42 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/system.cpython-310.pyc
ADDED
|
Binary file (811 Bytes). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/upload.cpython-310.pyc
ADDED
|
Binary file (7.61 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/__pycache__/upload_large_folder.cpython-310.pyc
ADDED
|
Binary file (3.74 kB). View file
|
|
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/_cli_utils.py
ADDED
|
@@ -0,0 +1,513 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains CLI utilities (styling, helpers)."""
|
| 15 |
+
|
| 16 |
+
import dataclasses
|
| 17 |
+
import datetime
|
| 18 |
+
import importlib.metadata
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import re
|
| 22 |
+
import time
|
| 23 |
+
from enum import Enum
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
from typing import TYPE_CHECKING, Annotated, Any, Callable, Literal, Optional, Sequence, Union, cast
|
| 26 |
+
|
| 27 |
+
import click
|
| 28 |
+
import typer
|
| 29 |
+
|
| 30 |
+
from huggingface_hub import __version__, constants
|
| 31 |
+
from huggingface_hub.utils import ANSI, get_session, hf_raise_for_status, installation_method, logging, tabulate
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger()
|
| 35 |
+
|
| 36 |
+
# Arbitrary maximum length of a cell in a table output
|
| 37 |
+
_MAX_CELL_LENGTH = 35
|
| 38 |
+
|
| 39 |
+
if TYPE_CHECKING:
|
| 40 |
+
from huggingface_hub.hf_api import HfApi
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_hf_api(token: Optional[str] = None) -> "HfApi":
|
| 44 |
+
# Import here to avoid circular import
|
| 45 |
+
from huggingface_hub.hf_api import HfApi
|
| 46 |
+
|
| 47 |
+
return HfApi(token=token, library_name="huggingface-cli", library_version=__version__)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
#### TYPER UTILS
|
| 51 |
+
|
| 52 |
+
CLI_REFERENCE_URL = "https://huggingface.co/docs/huggingface_hub/en/guides/cli"
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def generate_epilog(examples: list[str], docs_anchor: Optional[str] = None) -> str:
|
| 56 |
+
"""Generate an epilog with examples and a Learn More section.
|
| 57 |
+
|
| 58 |
+
Args:
|
| 59 |
+
examples: List of example commands (without the `$ ` prefix).
|
| 60 |
+
docs_anchor: Optional anchor for the docs URL (e.g., "#hf-download").
|
| 61 |
+
|
| 62 |
+
Returns:
|
| 63 |
+
Formatted epilog string.
|
| 64 |
+
"""
|
| 65 |
+
docs_url = f"{CLI_REFERENCE_URL}{docs_anchor}" if docs_anchor else CLI_REFERENCE_URL
|
| 66 |
+
examples_str = "\n".join(f" $ {ex}" for ex in examples)
|
| 67 |
+
return f"""\
|
| 68 |
+
Examples
|
| 69 |
+
{examples_str}
|
| 70 |
+
|
| 71 |
+
Learn more
|
| 72 |
+
Use `hf <command> --help` for more information about a command.
|
| 73 |
+
Read the documentation at {docs_url}
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
TOPIC_T = Union[Literal["main", "help"], str]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def _format_epilog_no_indent(epilog: Optional[str], ctx: click.Context, formatter: click.HelpFormatter) -> None:
|
| 81 |
+
"""Write the epilog without indentation."""
|
| 82 |
+
if epilog:
|
| 83 |
+
formatter.write_paragraph()
|
| 84 |
+
for line in epilog.split("\n"):
|
| 85 |
+
formatter.write_text(line)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
class HFCliTyperGroup(typer.core.TyperGroup):
|
| 89 |
+
"""
|
| 90 |
+
Typer Group that:
|
| 91 |
+
- lists commands alphabetically within sections.
|
| 92 |
+
- separates commands by topic (main, help, etc.).
|
| 93 |
+
- formats epilog without extra indentation.
|
| 94 |
+
"""
|
| 95 |
+
|
| 96 |
+
def format_commands(self, ctx: click.Context, formatter: click.HelpFormatter) -> None:
|
| 97 |
+
topics: dict[str, list] = {}
|
| 98 |
+
|
| 99 |
+
for name in self.list_commands(ctx):
|
| 100 |
+
cmd = self.get_command(ctx, name)
|
| 101 |
+
if cmd is None or cmd.hidden:
|
| 102 |
+
continue
|
| 103 |
+
help_text = cmd.get_short_help_str(limit=formatter.width)
|
| 104 |
+
topic = getattr(cmd, "topic", "main")
|
| 105 |
+
topics.setdefault(topic, []).append((name, help_text))
|
| 106 |
+
|
| 107 |
+
with formatter.section("Main commands"):
|
| 108 |
+
formatter.write_dl(topics["main"])
|
| 109 |
+
for topic in sorted(topics.keys()):
|
| 110 |
+
if topic == "main":
|
| 111 |
+
continue
|
| 112 |
+
with formatter.section(f"{topic.capitalize()} commands"):
|
| 113 |
+
formatter.write_dl(topics[topic])
|
| 114 |
+
|
| 115 |
+
def format_epilog(self, ctx: click.Context, formatter: click.HelpFormatter) -> None:
|
| 116 |
+
# Collect examples from all commands
|
| 117 |
+
all_examples: list[str] = []
|
| 118 |
+
for name in self.list_commands(ctx):
|
| 119 |
+
cmd = self.get_command(ctx, name)
|
| 120 |
+
if cmd is None or cmd.hidden:
|
| 121 |
+
continue
|
| 122 |
+
cmd_examples = getattr(cmd, "examples", [])
|
| 123 |
+
all_examples.extend(cmd_examples)
|
| 124 |
+
|
| 125 |
+
if all_examples:
|
| 126 |
+
epilog = generate_epilog(all_examples)
|
| 127 |
+
_format_epilog_no_indent(epilog, ctx, formatter)
|
| 128 |
+
elif self.epilog:
|
| 129 |
+
_format_epilog_no_indent(self.epilog, ctx, formatter)
|
| 130 |
+
|
| 131 |
+
def list_commands(self, ctx: click.Context) -> list[str]: # type: ignore[name-defined]
|
| 132 |
+
# click.Group stores both commands and subgroups in `self.commands`
|
| 133 |
+
return sorted(self.commands.keys())
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def HFCliCommand(topic: TOPIC_T, examples: Optional[list[str]] = None) -> type[typer.core.TyperCommand]:
|
| 137 |
+
def format_epilog(self: click.Command, ctx: click.Context, formatter: click.HelpFormatter) -> None:
|
| 138 |
+
_format_epilog_no_indent(self.epilog, ctx, formatter)
|
| 139 |
+
|
| 140 |
+
return type(
|
| 141 |
+
f"TyperCommand{topic.capitalize()}",
|
| 142 |
+
(typer.core.TyperCommand,),
|
| 143 |
+
{"topic": topic, "examples": examples or [], "format_epilog": format_epilog},
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class HFCliApp(typer.Typer):
|
| 148 |
+
"""Custom Typer app for Hugging Face CLI."""
|
| 149 |
+
|
| 150 |
+
def command( # type: ignore[override]
|
| 151 |
+
self,
|
| 152 |
+
name: Optional[str] = None,
|
| 153 |
+
*,
|
| 154 |
+
topic: TOPIC_T = "main",
|
| 155 |
+
examples: Optional[list[str]] = None,
|
| 156 |
+
context_settings: Optional[dict[str, Any]] = None,
|
| 157 |
+
help: Optional[str] = None,
|
| 158 |
+
epilog: Optional[str] = None,
|
| 159 |
+
short_help: Optional[str] = None,
|
| 160 |
+
options_metavar: str = "[OPTIONS]",
|
| 161 |
+
add_help_option: bool = True,
|
| 162 |
+
no_args_is_help: bool = False,
|
| 163 |
+
hidden: bool = False,
|
| 164 |
+
deprecated: bool = False,
|
| 165 |
+
rich_help_panel: Optional[str] = None,
|
| 166 |
+
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
|
| 167 |
+
# Generate epilog from examples if not explicitly provided
|
| 168 |
+
if epilog is None and examples:
|
| 169 |
+
epilog = generate_epilog(examples)
|
| 170 |
+
|
| 171 |
+
def _inner(func: Callable[..., Any]) -> Callable[..., Any]:
|
| 172 |
+
return super(HFCliApp, self).command(
|
| 173 |
+
name,
|
| 174 |
+
cls=HFCliCommand(topic, examples),
|
| 175 |
+
context_settings=context_settings,
|
| 176 |
+
help=help,
|
| 177 |
+
epilog=epilog,
|
| 178 |
+
short_help=short_help,
|
| 179 |
+
options_metavar=options_metavar,
|
| 180 |
+
add_help_option=add_help_option,
|
| 181 |
+
no_args_is_help=no_args_is_help,
|
| 182 |
+
hidden=hidden,
|
| 183 |
+
deprecated=deprecated,
|
| 184 |
+
rich_help_panel=rich_help_panel,
|
| 185 |
+
)(func)
|
| 186 |
+
|
| 187 |
+
return _inner
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def typer_factory(help: str, epilog: Optional[str] = None) -> "HFCliApp":
|
| 191 |
+
"""Create a Typer app with consistent settings.
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
help: Help text for the app.
|
| 195 |
+
epilog: Optional epilog text (use `generate_epilog` to create one).
|
| 196 |
+
|
| 197 |
+
Returns:
|
| 198 |
+
A configured Typer app.
|
| 199 |
+
"""
|
| 200 |
+
return HFCliApp(
|
| 201 |
+
help=help,
|
| 202 |
+
epilog=epilog,
|
| 203 |
+
add_completion=True,
|
| 204 |
+
no_args_is_help=True,
|
| 205 |
+
cls=HFCliTyperGroup,
|
| 206 |
+
# Disable rich completely for consistent experience
|
| 207 |
+
rich_markup_mode=None,
|
| 208 |
+
rich_help_panel=None,
|
| 209 |
+
pretty_exceptions_enable=False,
|
| 210 |
+
# Increase max content width for better readability
|
| 211 |
+
context_settings={
|
| 212 |
+
"max_content_width": 120,
|
| 213 |
+
},
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class RepoType(str, Enum):
|
| 218 |
+
model = "model"
|
| 219 |
+
dataset = "dataset"
|
| 220 |
+
space = "space"
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
RepoIdArg = Annotated[
|
| 224 |
+
str,
|
| 225 |
+
typer.Argument(
|
| 226 |
+
help="The ID of the repo (e.g. `username/repo-name`).",
|
| 227 |
+
),
|
| 228 |
+
]
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
RepoTypeOpt = Annotated[
|
| 232 |
+
RepoType,
|
| 233 |
+
typer.Option(
|
| 234 |
+
help="The type of repository (model, dataset, or space).",
|
| 235 |
+
),
|
| 236 |
+
]
|
| 237 |
+
|
| 238 |
+
TokenOpt = Annotated[
|
| 239 |
+
Optional[str],
|
| 240 |
+
typer.Option(
|
| 241 |
+
help="A User Access Token generated from https://huggingface.co/settings/tokens.",
|
| 242 |
+
),
|
| 243 |
+
]
|
| 244 |
+
|
| 245 |
+
PrivateOpt = Annotated[
|
| 246 |
+
Optional[bool],
|
| 247 |
+
typer.Option(
|
| 248 |
+
help="Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already exists.",
|
| 249 |
+
),
|
| 250 |
+
]
|
| 251 |
+
|
| 252 |
+
RevisionOpt = Annotated[
|
| 253 |
+
Optional[str],
|
| 254 |
+
typer.Option(
|
| 255 |
+
help="Git revision id which can be a branch name, a tag, or a commit hash.",
|
| 256 |
+
),
|
| 257 |
+
]
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
LimitOpt = Annotated[
|
| 261 |
+
int,
|
| 262 |
+
typer.Option(help="Limit the number of results."),
|
| 263 |
+
]
|
| 264 |
+
|
| 265 |
+
AuthorOpt = Annotated[
|
| 266 |
+
Optional[str],
|
| 267 |
+
typer.Option(help="Filter by author or organization."),
|
| 268 |
+
]
|
| 269 |
+
|
| 270 |
+
FilterOpt = Annotated[
|
| 271 |
+
Optional[list[str]],
|
| 272 |
+
typer.Option(help="Filter by tags (e.g. 'text-classification'). Can be used multiple times."),
|
| 273 |
+
]
|
| 274 |
+
|
| 275 |
+
SearchOpt = Annotated[
|
| 276 |
+
Optional[str],
|
| 277 |
+
typer.Option(help="Search query."),
|
| 278 |
+
]
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
class OutputFormat(str, Enum):
|
| 282 |
+
"""Output format for CLI list commands."""
|
| 283 |
+
|
| 284 |
+
table = "table"
|
| 285 |
+
json = "json"
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
FormatOpt = Annotated[
|
| 289 |
+
OutputFormat,
|
| 290 |
+
typer.Option(
|
| 291 |
+
help="Output format (table or json).",
|
| 292 |
+
),
|
| 293 |
+
]
|
| 294 |
+
|
| 295 |
+
QuietOpt = Annotated[
|
| 296 |
+
bool,
|
| 297 |
+
typer.Option(
|
| 298 |
+
"-q",
|
| 299 |
+
"--quiet",
|
| 300 |
+
help="Print only IDs (one per line).",
|
| 301 |
+
),
|
| 302 |
+
]
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _to_header(name: str) -> str:
|
| 306 |
+
"""Convert a camelCase or PascalCase string to SCREAMING_SNAKE_CASE to be used as table header."""
|
| 307 |
+
s = re.sub(r"([a-z])([A-Z])", r"\1_\2", name)
|
| 308 |
+
return s.upper()
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def _format_value(value: Any) -> str:
|
| 312 |
+
"""Convert a value to string for terminal display."""
|
| 313 |
+
if not value:
|
| 314 |
+
return ""
|
| 315 |
+
if isinstance(value, bool):
|
| 316 |
+
return "✔" if value else ""
|
| 317 |
+
if isinstance(value, datetime.datetime):
|
| 318 |
+
return value.strftime("%Y-%m-%d")
|
| 319 |
+
if isinstance(value, str) and re.match(r"^\d{4}-\d{2}-\d{2}T", value):
|
| 320 |
+
return value[:10]
|
| 321 |
+
if isinstance(value, list):
|
| 322 |
+
return ", ".join(_format_value(v) for v in value)
|
| 323 |
+
elif isinstance(value, dict):
|
| 324 |
+
if "name" in value: # Likely to be a user or org => print name
|
| 325 |
+
return str(value["name"])
|
| 326 |
+
# TODO: extend if needed
|
| 327 |
+
return json.dumps(value)
|
| 328 |
+
return str(value)
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
def _format_cell(value: Any, max_len: int = _MAX_CELL_LENGTH) -> str:
|
| 332 |
+
"""Format a value + truncate it for table display."""
|
| 333 |
+
cell = _format_value(value)
|
| 334 |
+
if len(cell) > max_len:
|
| 335 |
+
cell = cell[: max_len - 3] + "..."
|
| 336 |
+
return cell
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def print_as_table(
|
| 340 |
+
items: Sequence[dict[str, Any]],
|
| 341 |
+
headers: list[str],
|
| 342 |
+
row_fn: Callable[[dict[str, Any]], list[str]],
|
| 343 |
+
) -> None:
|
| 344 |
+
"""Print items as a formatted table.
|
| 345 |
+
|
| 346 |
+
Args:
|
| 347 |
+
items: Sequence of dictionaries representing the items to display.
|
| 348 |
+
headers: List of column headers.
|
| 349 |
+
row_fn: Function that takes an item dict and returns a list of string values for each column.
|
| 350 |
+
"""
|
| 351 |
+
if not items:
|
| 352 |
+
print("No results found.")
|
| 353 |
+
return
|
| 354 |
+
rows = cast(list[list[Union[str, int]]], [row_fn(item) for item in items])
|
| 355 |
+
print(tabulate(rows, headers=[_to_header(h) for h in headers]))
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
def print_list_output(
|
| 359 |
+
items: Sequence[dict[str, Any]],
|
| 360 |
+
format: OutputFormat,
|
| 361 |
+
quiet: bool,
|
| 362 |
+
id_key: str = "id",
|
| 363 |
+
headers: Optional[list[str]] = None,
|
| 364 |
+
row_fn: Optional[Callable[[dict[str, Any]], list[str]]] = None,
|
| 365 |
+
) -> None:
|
| 366 |
+
"""Print list command output in the specified format.
|
| 367 |
+
|
| 368 |
+
Args:
|
| 369 |
+
items: Sequence of dictionaries representing the items to display.
|
| 370 |
+
format: Output format (table or json).
|
| 371 |
+
quiet: If True, print only IDs (one per line).
|
| 372 |
+
id_key: Key to use for extracting IDs in quiet mode.
|
| 373 |
+
headers: Optional list of column names for headers. If not provided, auto-detected from keys.
|
| 374 |
+
row_fn: Optional function to extract row values. If not provided, uses _format_cell on each column.
|
| 375 |
+
"""
|
| 376 |
+
if quiet:
|
| 377 |
+
for item in items:
|
| 378 |
+
print(item[id_key])
|
| 379 |
+
return
|
| 380 |
+
|
| 381 |
+
if format == OutputFormat.json:
|
| 382 |
+
print(json.dumps(list(items), indent=2))
|
| 383 |
+
return
|
| 384 |
+
|
| 385 |
+
if headers is None:
|
| 386 |
+
all_columns = list(items[0].keys()) if items else [id_key]
|
| 387 |
+
headers = [col for col in all_columns if any(_format_cell(item.get(col)) for item in items)]
|
| 388 |
+
|
| 389 |
+
if row_fn is None:
|
| 390 |
+
|
| 391 |
+
def row_fn(item: dict[str, Any]) -> list[str]:
|
| 392 |
+
return [_format_cell(item.get(col)) for col in headers] # type: ignore[union-attr]
|
| 393 |
+
|
| 394 |
+
print_as_table(items, headers=headers, row_fn=row_fn)
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
def _serialize_value(v: object) -> object:
|
| 398 |
+
"""Recursively serialize a value to be JSON-compatible."""
|
| 399 |
+
if isinstance(v, datetime.datetime):
|
| 400 |
+
return v.isoformat()
|
| 401 |
+
elif isinstance(v, dict):
|
| 402 |
+
return {key: _serialize_value(val) for key, val in v.items() if val is not None}
|
| 403 |
+
elif isinstance(v, list):
|
| 404 |
+
return [_serialize_value(item) for item in v]
|
| 405 |
+
return v
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def api_object_to_dict(info: Any) -> dict[str, Any]:
|
| 409 |
+
"""Convert repo info dataclasses to json-serializable dicts."""
|
| 410 |
+
return {k: _serialize_value(v) for k, v in dataclasses.asdict(info).items() if v is not None}
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def make_expand_properties_parser(valid_properties: list[str]):
|
| 414 |
+
"""Create a callback to parse and validate comma-separated expand properties."""
|
| 415 |
+
|
| 416 |
+
def _parse_expand_properties(value: Optional[str]) -> Optional[list[str]]:
|
| 417 |
+
if value is None:
|
| 418 |
+
return None
|
| 419 |
+
properties = [p.strip() for p in value.split(",")]
|
| 420 |
+
for prop in properties:
|
| 421 |
+
if prop not in valid_properties:
|
| 422 |
+
raise typer.BadParameter(
|
| 423 |
+
f"Invalid expand property: '{prop}'. Valid values are: {', '.join(valid_properties)}"
|
| 424 |
+
)
|
| 425 |
+
return properties
|
| 426 |
+
|
| 427 |
+
return _parse_expand_properties
|
| 428 |
+
|
| 429 |
+
|
| 430 |
+
### PyPI VERSION CHECKER
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def check_cli_update(library: Literal["huggingface_hub", "transformers"]) -> None:
|
| 434 |
+
"""
|
| 435 |
+
Check whether a newer version of a library is available on PyPI.
|
| 436 |
+
|
| 437 |
+
If a newer version is found, notify the user and suggest updating.
|
| 438 |
+
If current version is a pre-release (e.g. `1.0.0.rc1`), or a dev version (e.g. `1.0.0.dev1`), no check is performed.
|
| 439 |
+
|
| 440 |
+
This function is called at the entry point of the CLI. It only performs the check once every 24 hours, and any error
|
| 441 |
+
during the check is caught and logged, to avoid breaking the CLI.
|
| 442 |
+
|
| 443 |
+
Args:
|
| 444 |
+
library: The library to check for updates. Currently supports "huggingface_hub" and "transformers".
|
| 445 |
+
"""
|
| 446 |
+
try:
|
| 447 |
+
_check_cli_update(library)
|
| 448 |
+
except Exception:
|
| 449 |
+
# We don't want the CLI to fail on version checks, no matter the reason.
|
| 450 |
+
logger.debug("Error while checking for CLI update.", exc_info=True)
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
def _check_cli_update(library: Literal["huggingface_hub", "transformers"]) -> None:
|
| 454 |
+
current_version = importlib.metadata.version(library)
|
| 455 |
+
|
| 456 |
+
# Skip if current version is a pre-release or dev version
|
| 457 |
+
if any(tag in current_version for tag in ["rc", "dev"]):
|
| 458 |
+
return
|
| 459 |
+
|
| 460 |
+
# Skip if already checked in the last 24 hours
|
| 461 |
+
if os.path.exists(constants.CHECK_FOR_UPDATE_DONE_PATH):
|
| 462 |
+
mtime = os.path.getmtime(constants.CHECK_FOR_UPDATE_DONE_PATH)
|
| 463 |
+
if (time.time() - mtime) < 24 * 3600:
|
| 464 |
+
return
|
| 465 |
+
|
| 466 |
+
# Touch the file to mark that we did the check now
|
| 467 |
+
Path(constants.CHECK_FOR_UPDATE_DONE_PATH).parent.mkdir(parents=True, exist_ok=True)
|
| 468 |
+
Path(constants.CHECK_FOR_UPDATE_DONE_PATH).touch()
|
| 469 |
+
|
| 470 |
+
# Check latest version from PyPI
|
| 471 |
+
response = get_session().get(f"https://pypi.org/pypi/{library}/json", timeout=2)
|
| 472 |
+
hf_raise_for_status(response)
|
| 473 |
+
data = response.json()
|
| 474 |
+
latest_version = data["info"]["version"]
|
| 475 |
+
|
| 476 |
+
# If latest version is different from current, notify user
|
| 477 |
+
if current_version != latest_version:
|
| 478 |
+
if library == "huggingface_hub":
|
| 479 |
+
update_command = _get_huggingface_hub_update_command()
|
| 480 |
+
else:
|
| 481 |
+
update_command = _get_transformers_update_command()
|
| 482 |
+
|
| 483 |
+
click.echo(
|
| 484 |
+
ANSI.yellow(
|
| 485 |
+
f"A new version of {library} ({latest_version}) is available! "
|
| 486 |
+
f"You are using version {current_version}.\n"
|
| 487 |
+
f"To update, run: {ANSI.bold(update_command)}\n",
|
| 488 |
+
)
|
| 489 |
+
)
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def _get_huggingface_hub_update_command() -> str:
|
| 493 |
+
"""Return the command to update huggingface_hub."""
|
| 494 |
+
method = installation_method()
|
| 495 |
+
if method == "brew":
|
| 496 |
+
return "brew upgrade huggingface-cli"
|
| 497 |
+
elif method == "hf_installer" and os.name == "nt":
|
| 498 |
+
return 'powershell -NoProfile -Command "iwr -useb https://hf.co/cli/install.ps1 | iex"'
|
| 499 |
+
elif method == "hf_installer":
|
| 500 |
+
return "curl -LsSf https://hf.co/cli/install.sh | bash -"
|
| 501 |
+
else: # unknown => likely pip
|
| 502 |
+
return "pip install -U huggingface_hub"
|
| 503 |
+
|
| 504 |
+
|
| 505 |
+
def _get_transformers_update_command() -> str:
|
| 506 |
+
"""Return the command to update transformers."""
|
| 507 |
+
method = installation_method()
|
| 508 |
+
if method == "hf_installer" and os.name == "nt":
|
| 509 |
+
return 'powershell -NoProfile -Command "iwr -useb https://hf.co/cli/install.ps1 | iex" -WithTransformers'
|
| 510 |
+
elif method == "hf_installer":
|
| 511 |
+
return "curl -LsSf https://hf.co/cli/install.sh | bash -s -- --with-transformers"
|
| 512 |
+
else: # brew/unknown => likely pip
|
| 513 |
+
return "pip install -U transformers"
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/_errors.py
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2026 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""CLI error handling utilities."""
|
| 15 |
+
|
| 16 |
+
from typing import Callable, Optional
|
| 17 |
+
|
| 18 |
+
from huggingface_hub.errors import (
|
| 19 |
+
GatedRepoError,
|
| 20 |
+
HfHubHTTPError,
|
| 21 |
+
LocalTokenNotFoundError,
|
| 22 |
+
RemoteEntryNotFoundError,
|
| 23 |
+
RepositoryNotFoundError,
|
| 24 |
+
RevisionNotFoundError,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
CLI_ERROR_MAPPINGS: dict[type[Exception], Callable[[Exception], str]] = {
|
| 29 |
+
RepositoryNotFoundError: lambda e: (
|
| 30 |
+
"Repository not found. Check the `repo_id` and `repo_type` parameters. If the repo is private, make sure you are authenticated."
|
| 31 |
+
),
|
| 32 |
+
RevisionNotFoundError: lambda e: "Revision not found. Check the `revision` parameter.",
|
| 33 |
+
GatedRepoError: lambda e: "Access denied. This repository requires approval.",
|
| 34 |
+
LocalTokenNotFoundError: lambda e: "Not logged in. Run 'hf auth login' first.",
|
| 35 |
+
RemoteEntryNotFoundError: lambda e: "File not found in repository.",
|
| 36 |
+
HfHubHTTPError: lambda e: str(e),
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def format_known_exception(e: Exception) -> Optional[str]:
|
| 41 |
+
for exc_type, formatter in CLI_ERROR_MAPPINGS.items():
|
| 42 |
+
if isinstance(e, exc_type):
|
| 43 |
+
return formatter(e)
|
| 44 |
+
return None
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/auth.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to authenticate to the Hugging Face Hub and interact with your repositories.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# login and save token locally.
|
| 18 |
+
hf auth login --token=hf_*** --add-to-git-credential
|
| 19 |
+
|
| 20 |
+
# switch between tokens
|
| 21 |
+
hf auth switch
|
| 22 |
+
|
| 23 |
+
# list all tokens
|
| 24 |
+
hf auth list
|
| 25 |
+
|
| 26 |
+
# logout from all tokens
|
| 27 |
+
hf auth logout
|
| 28 |
+
|
| 29 |
+
# check which account you are logged in as
|
| 30 |
+
hf auth whoami
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
from typing import Annotated, Optional
|
| 34 |
+
|
| 35 |
+
import typer
|
| 36 |
+
|
| 37 |
+
from huggingface_hub.constants import ENDPOINT
|
| 38 |
+
from huggingface_hub.hf_api import whoami
|
| 39 |
+
|
| 40 |
+
from .._login import auth_list, auth_switch, login, logout
|
| 41 |
+
from ..utils import ANSI, get_stored_tokens, get_token, logging
|
| 42 |
+
from ._cli_utils import TokenOpt, typer_factory
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
logger = logging.get_logger(__name__)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
auth_cli = typer_factory(help="Manage authentication (login, logout, etc.).")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@auth_cli.command(
|
| 52 |
+
"login",
|
| 53 |
+
examples=[
|
| 54 |
+
"hf auth login",
|
| 55 |
+
"hf auth login --token $HF_TOKEN",
|
| 56 |
+
"hf auth login --token $HF_TOKEN --add-to-git-credential",
|
| 57 |
+
],
|
| 58 |
+
)
|
| 59 |
+
def auth_login(
|
| 60 |
+
token: TokenOpt = None,
|
| 61 |
+
add_to_git_credential: Annotated[
|
| 62 |
+
bool,
|
| 63 |
+
typer.Option(
|
| 64 |
+
help="Save to git credential helper. Useful only if you plan to run git commands directly.",
|
| 65 |
+
),
|
| 66 |
+
] = False,
|
| 67 |
+
) -> None:
|
| 68 |
+
"""Login using a token from huggingface.co/settings/tokens."""
|
| 69 |
+
login(token=token, add_to_git_credential=add_to_git_credential)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@auth_cli.command(
|
| 73 |
+
"logout",
|
| 74 |
+
examples=["hf auth logout", "hf auth logout --token-name my-token"],
|
| 75 |
+
)
|
| 76 |
+
def auth_logout(
|
| 77 |
+
token_name: Annotated[
|
| 78 |
+
Optional[str],
|
| 79 |
+
typer.Option(help="Name of token to logout"),
|
| 80 |
+
] = None,
|
| 81 |
+
) -> None:
|
| 82 |
+
"""Logout from a specific token."""
|
| 83 |
+
logout(token_name=token_name)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _select_token_name() -> Optional[str]:
|
| 87 |
+
token_names = list(get_stored_tokens().keys())
|
| 88 |
+
|
| 89 |
+
if not token_names:
|
| 90 |
+
logger.error("No stored tokens found. Please login first.")
|
| 91 |
+
return None
|
| 92 |
+
|
| 93 |
+
print("Available stored tokens:")
|
| 94 |
+
for i, token_name in enumerate(token_names, 1):
|
| 95 |
+
print(f"{i}. {token_name}")
|
| 96 |
+
while True:
|
| 97 |
+
try:
|
| 98 |
+
choice = input("Enter the number of the token to switch to (or 'q' to quit): ")
|
| 99 |
+
if choice.lower() == "q":
|
| 100 |
+
return None
|
| 101 |
+
index = int(choice) - 1
|
| 102 |
+
if 0 <= index < len(token_names):
|
| 103 |
+
return token_names[index]
|
| 104 |
+
else:
|
| 105 |
+
print("Invalid selection. Please try again.")
|
| 106 |
+
except ValueError:
|
| 107 |
+
print("Invalid input. Please enter a number or 'q' to quit.")
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
@auth_cli.command(
|
| 111 |
+
"switch",
|
| 112 |
+
examples=["hf auth switch", "hf auth switch --token-name my-token"],
|
| 113 |
+
)
|
| 114 |
+
def auth_switch_cmd(
|
| 115 |
+
token_name: Annotated[
|
| 116 |
+
Optional[str],
|
| 117 |
+
typer.Option(
|
| 118 |
+
help="Name of the token to switch to",
|
| 119 |
+
),
|
| 120 |
+
] = None,
|
| 121 |
+
add_to_git_credential: Annotated[
|
| 122 |
+
bool,
|
| 123 |
+
typer.Option(
|
| 124 |
+
help="Save to git credential helper. Useful only if you plan to run git commands directly.",
|
| 125 |
+
),
|
| 126 |
+
] = False,
|
| 127 |
+
) -> None:
|
| 128 |
+
"""Switch between access tokens."""
|
| 129 |
+
if token_name is None:
|
| 130 |
+
token_name = _select_token_name()
|
| 131 |
+
if token_name is None:
|
| 132 |
+
print("No token name provided. Aborting.")
|
| 133 |
+
raise typer.Exit()
|
| 134 |
+
auth_switch(token_name, add_to_git_credential=add_to_git_credential)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@auth_cli.command("list", examples=["hf auth list"])
|
| 138 |
+
def auth_list_cmd() -> None:
|
| 139 |
+
"""List all stored access tokens."""
|
| 140 |
+
auth_list()
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
@auth_cli.command("whoami", examples=["hf auth whoami"])
|
| 144 |
+
def auth_whoami() -> None:
|
| 145 |
+
"""Find out which huggingface.co account you are logged in as."""
|
| 146 |
+
token = get_token()
|
| 147 |
+
if token is None:
|
| 148 |
+
print("Not logged in")
|
| 149 |
+
raise typer.Exit()
|
| 150 |
+
info = whoami(token)
|
| 151 |
+
print(ANSI.bold("user: "), info["name"])
|
| 152 |
+
orgs = [org["name"] for org in info["orgs"]]
|
| 153 |
+
if orgs:
|
| 154 |
+
print(ANSI.bold("orgs: "), ",".join(orgs))
|
| 155 |
+
|
| 156 |
+
if ENDPOINT != "https://huggingface.co":
|
| 157 |
+
print(f"Authenticated through private endpoint: {ENDPOINT}")
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/cache.py
ADDED
|
@@ -0,0 +1,811 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2025-present, the HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Contains the 'hf cache' command group with cache management subcommands."""
|
| 16 |
+
|
| 17 |
+
import json
|
| 18 |
+
import re
|
| 19 |
+
import sys
|
| 20 |
+
import time
|
| 21 |
+
from collections import defaultdict
|
| 22 |
+
from dataclasses import dataclass
|
| 23 |
+
from enum import Enum
|
| 24 |
+
from typing import Annotated, Any, Callable, Dict, List, Mapping, Optional, Tuple
|
| 25 |
+
|
| 26 |
+
import typer
|
| 27 |
+
|
| 28 |
+
from huggingface_hub.errors import CLIError
|
| 29 |
+
|
| 30 |
+
from ..utils import (
|
| 31 |
+
ANSI,
|
| 32 |
+
CachedRepoInfo,
|
| 33 |
+
CachedRevisionInfo,
|
| 34 |
+
CacheNotFound,
|
| 35 |
+
HFCacheInfo,
|
| 36 |
+
_format_size,
|
| 37 |
+
scan_cache_dir,
|
| 38 |
+
tabulate,
|
| 39 |
+
)
|
| 40 |
+
from ..utils._parsing import parse_duration, parse_size
|
| 41 |
+
from ._cli_utils import (
|
| 42 |
+
OutputFormat,
|
| 43 |
+
RepoIdArg,
|
| 44 |
+
RepoTypeOpt,
|
| 45 |
+
RevisionOpt,
|
| 46 |
+
TokenOpt,
|
| 47 |
+
get_hf_api,
|
| 48 |
+
typer_factory,
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
cache_cli = typer_factory(help="Manage local cache directory.")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
#### Cache helper utilities
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
@dataclass(frozen=True)
|
| 59 |
+
class _DeletionResolution:
|
| 60 |
+
revisions: frozenset[str]
|
| 61 |
+
selected: dict[CachedRepoInfo, frozenset[CachedRevisionInfo]]
|
| 62 |
+
missing: tuple[str, ...]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
_FILTER_PATTERN = re.compile(r"^(?P<key>[a-zA-Z_]+)\s*(?P<op>==|!=|>=|<=|>|<|=)\s*(?P<value>.+)$")
|
| 66 |
+
_ALLOWED_OPERATORS = {"=", "!=", ">", "<", ">=", "<="}
|
| 67 |
+
_FILTER_KEYS = {"accessed", "modified", "refs", "size", "type"}
|
| 68 |
+
_SORT_KEYS = {"accessed", "modified", "name", "size"}
|
| 69 |
+
_SORT_PATTERN = re.compile(r"^(?P<key>[a-zA-Z_]+)(?::(?P<order>asc|desc))?$")
|
| 70 |
+
_SORT_DEFAULT_ORDER = {
|
| 71 |
+
# Default ordering: accessed/modified/size are descending (newest/biggest first), name is ascending
|
| 72 |
+
"accessed": "desc",
|
| 73 |
+
"modified": "desc",
|
| 74 |
+
"size": "desc",
|
| 75 |
+
"name": "asc",
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Dynamically generate SortOptions enum from _SORT_KEYS
|
| 80 |
+
_sort_options_dict = {}
|
| 81 |
+
for key in sorted(_SORT_KEYS):
|
| 82 |
+
_sort_options_dict[key] = key
|
| 83 |
+
_sort_options_dict[f"{key}_asc"] = f"{key}:asc"
|
| 84 |
+
_sort_options_dict[f"{key}_desc"] = f"{key}:desc"
|
| 85 |
+
|
| 86 |
+
SortOptions = Enum("SortOptions", _sort_options_dict, type=str, module=__name__) # type: ignore
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@dataclass(frozen=True)
|
| 90 |
+
class CacheDeletionCounts:
|
| 91 |
+
"""Simple counters summarizing cache deletions for CLI messaging."""
|
| 92 |
+
|
| 93 |
+
repo_count: int
|
| 94 |
+
partial_revision_count: int
|
| 95 |
+
total_revision_count: int
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
CacheEntry = Tuple[CachedRepoInfo, Optional[CachedRevisionInfo]]
|
| 99 |
+
RepoRefsMap = Dict[CachedRepoInfo, frozenset[str]]
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def summarize_deletions(
|
| 103 |
+
selected_by_repo: Mapping[CachedRepoInfo, frozenset[CachedRevisionInfo]],
|
| 104 |
+
) -> CacheDeletionCounts:
|
| 105 |
+
"""Summarize deletions across repositories."""
|
| 106 |
+
repo_count = 0
|
| 107 |
+
total_revisions = 0
|
| 108 |
+
revisions_in_full_repos = 0
|
| 109 |
+
|
| 110 |
+
for repo, revisions in selected_by_repo.items():
|
| 111 |
+
total_revisions += len(revisions)
|
| 112 |
+
if len(revisions) == len(repo.revisions):
|
| 113 |
+
repo_count += 1
|
| 114 |
+
revisions_in_full_repos += len(revisions)
|
| 115 |
+
|
| 116 |
+
partial_revision_count = total_revisions - revisions_in_full_repos
|
| 117 |
+
return CacheDeletionCounts(repo_count, partial_revision_count, total_revisions)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def print_cache_selected_revisions(selected_by_repo: Mapping[CachedRepoInfo, frozenset[CachedRevisionInfo]]) -> None:
|
| 121 |
+
"""Pretty-print selected cache revisions during confirmation prompts."""
|
| 122 |
+
for repo in sorted(selected_by_repo.keys(), key=lambda repo: (repo.repo_type, repo.repo_id.lower())):
|
| 123 |
+
repo_key = f"{repo.repo_type}/{repo.repo_id}"
|
| 124 |
+
revisions = sorted(selected_by_repo[repo], key=lambda rev: rev.commit_hash)
|
| 125 |
+
if len(revisions) == len(repo.revisions):
|
| 126 |
+
print(f" - {repo_key} (entire repo)")
|
| 127 |
+
continue
|
| 128 |
+
|
| 129 |
+
print(f" - {repo_key}:")
|
| 130 |
+
for revision in revisions:
|
| 131 |
+
refs = " ".join(sorted(revision.refs)) or "(detached)"
|
| 132 |
+
print(f" {revision.commit_hash} [{refs}] {revision.size_on_disk_str}")
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def build_cache_index(
|
| 136 |
+
hf_cache_info: HFCacheInfo,
|
| 137 |
+
) -> Tuple[
|
| 138 |
+
Dict[str, CachedRepoInfo],
|
| 139 |
+
Dict[str, Tuple[CachedRepoInfo, CachedRevisionInfo]],
|
| 140 |
+
]:
|
| 141 |
+
"""Create lookup tables so CLI commands can resolve repo ids and revisions quickly."""
|
| 142 |
+
repo_lookup: dict[str, CachedRepoInfo] = {}
|
| 143 |
+
revision_lookup: dict[str, tuple[CachedRepoInfo, CachedRevisionInfo]] = {}
|
| 144 |
+
for repo in hf_cache_info.repos:
|
| 145 |
+
repo_key = repo.cache_id.lower()
|
| 146 |
+
repo_lookup[repo_key] = repo
|
| 147 |
+
for revision in repo.revisions:
|
| 148 |
+
revision_lookup[revision.commit_hash.lower()] = (repo, revision)
|
| 149 |
+
return repo_lookup, revision_lookup
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def collect_cache_entries(
|
| 153 |
+
hf_cache_info: HFCacheInfo, *, include_revisions: bool
|
| 154 |
+
) -> Tuple[List[CacheEntry], RepoRefsMap]:
|
| 155 |
+
"""Flatten cache metadata into rows consumed by `hf cache ls`."""
|
| 156 |
+
entries: List[CacheEntry] = []
|
| 157 |
+
repo_refs_map: RepoRefsMap = {}
|
| 158 |
+
sorted_repos = sorted(hf_cache_info.repos, key=lambda repo: (repo.repo_type, repo.repo_id.lower()))
|
| 159 |
+
for repo in sorted_repos:
|
| 160 |
+
repo_refs_map[repo] = frozenset({ref for revision in repo.revisions for ref in revision.refs})
|
| 161 |
+
if include_revisions:
|
| 162 |
+
for revision in sorted(repo.revisions, key=lambda rev: rev.commit_hash):
|
| 163 |
+
entries.append((repo, revision))
|
| 164 |
+
else:
|
| 165 |
+
entries.append((repo, None))
|
| 166 |
+
if include_revisions:
|
| 167 |
+
entries.sort(
|
| 168 |
+
key=lambda entry: (
|
| 169 |
+
entry[0].cache_id,
|
| 170 |
+
entry[1].commit_hash if entry[1] is not None else "",
|
| 171 |
+
)
|
| 172 |
+
)
|
| 173 |
+
else:
|
| 174 |
+
entries.sort(key=lambda entry: entry[0].cache_id)
|
| 175 |
+
return entries, repo_refs_map
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def compile_cache_filter(
|
| 179 |
+
expr: str, repo_refs_map: RepoRefsMap
|
| 180 |
+
) -> Callable[[CachedRepoInfo, Optional[CachedRevisionInfo], float], bool]:
|
| 181 |
+
"""Convert a `hf cache ls` filter expression into the yes/no test we apply to each cache entry before displaying it."""
|
| 182 |
+
match = _FILTER_PATTERN.match(expr.strip())
|
| 183 |
+
if not match:
|
| 184 |
+
raise ValueError(f"Invalid filter expression: '{expr}'.")
|
| 185 |
+
|
| 186 |
+
key = match.group("key").lower()
|
| 187 |
+
op = match.group("op")
|
| 188 |
+
value_raw = match.group("value").strip()
|
| 189 |
+
|
| 190 |
+
if op not in _ALLOWED_OPERATORS:
|
| 191 |
+
raise ValueError(f"Unsupported operator '{op}' in filter '{expr}'. Must be one of {list(_ALLOWED_OPERATORS)}.")
|
| 192 |
+
|
| 193 |
+
if key not in _FILTER_KEYS:
|
| 194 |
+
raise ValueError(f"Unsupported filter key '{key}' in '{expr}'. Must be one of {list(_FILTER_KEYS)}.")
|
| 195 |
+
# at this point we know that key is in `_FILTER_KEYS`
|
| 196 |
+
if key == "size":
|
| 197 |
+
size_threshold = parse_size(value_raw)
|
| 198 |
+
return lambda repo, revision, _: _compare_numeric(
|
| 199 |
+
revision.size_on_disk if revision is not None else repo.size_on_disk,
|
| 200 |
+
op,
|
| 201 |
+
size_threshold,
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
if key in {"modified", "accessed"}:
|
| 205 |
+
seconds = parse_duration(value_raw.strip())
|
| 206 |
+
|
| 207 |
+
def _time_filter(repo: CachedRepoInfo, revision: Optional[CachedRevisionInfo], now: float) -> bool:
|
| 208 |
+
timestamp = (
|
| 209 |
+
repo.last_accessed
|
| 210 |
+
if key == "accessed"
|
| 211 |
+
else revision.last_modified
|
| 212 |
+
if revision is not None
|
| 213 |
+
else repo.last_modified
|
| 214 |
+
)
|
| 215 |
+
if timestamp is None:
|
| 216 |
+
return False
|
| 217 |
+
return _compare_numeric(now - timestamp, op, seconds)
|
| 218 |
+
|
| 219 |
+
return _time_filter
|
| 220 |
+
|
| 221 |
+
if key == "type":
|
| 222 |
+
expected = value_raw.lower()
|
| 223 |
+
|
| 224 |
+
if op != "=":
|
| 225 |
+
raise ValueError(f"Only '=' is supported for 'type' filters. Got '{op}'.")
|
| 226 |
+
|
| 227 |
+
def _type_filter(repo: CachedRepoInfo, revision: Optional[CachedRevisionInfo], _: float) -> bool:
|
| 228 |
+
return repo.repo_type.lower() == expected
|
| 229 |
+
|
| 230 |
+
return _type_filter
|
| 231 |
+
|
| 232 |
+
else: # key == "refs"
|
| 233 |
+
if op != "=":
|
| 234 |
+
raise ValueError(f"Only '=' is supported for 'refs' filters. Got {op}.")
|
| 235 |
+
|
| 236 |
+
def _refs_filter(repo: CachedRepoInfo, revision: Optional[CachedRevisionInfo], _: float) -> bool:
|
| 237 |
+
refs = revision.refs if revision is not None else repo_refs_map.get(repo, frozenset())
|
| 238 |
+
return value_raw.lower() in [ref.lower() for ref in refs]
|
| 239 |
+
|
| 240 |
+
return _refs_filter
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
def _build_cache_export_payload(
|
| 244 |
+
entries: List[CacheEntry], *, include_revisions: bool, repo_refs_map: RepoRefsMap
|
| 245 |
+
) -> List[Dict[str, Any]]:
|
| 246 |
+
"""Normalize cache entries into serializable records for JSON/CSV exports."""
|
| 247 |
+
payload: List[Dict[str, Any]] = []
|
| 248 |
+
for repo, revision in entries:
|
| 249 |
+
if include_revisions:
|
| 250 |
+
if revision is None:
|
| 251 |
+
continue
|
| 252 |
+
record: Dict[str, Any] = {
|
| 253 |
+
"repo_id": repo.repo_id,
|
| 254 |
+
"repo_type": repo.repo_type,
|
| 255 |
+
"revision": revision.commit_hash,
|
| 256 |
+
"snapshot_path": str(revision.snapshot_path),
|
| 257 |
+
"size_on_disk": revision.size_on_disk,
|
| 258 |
+
"last_accessed": repo.last_accessed,
|
| 259 |
+
"last_modified": revision.last_modified,
|
| 260 |
+
"refs": sorted(revision.refs),
|
| 261 |
+
}
|
| 262 |
+
else:
|
| 263 |
+
record = {
|
| 264 |
+
"repo_id": repo.repo_id,
|
| 265 |
+
"repo_type": repo.repo_type,
|
| 266 |
+
"size_on_disk": repo.size_on_disk,
|
| 267 |
+
"last_accessed": repo.last_accessed,
|
| 268 |
+
"last_modified": repo.last_modified,
|
| 269 |
+
"refs": sorted(repo_refs_map.get(repo, frozenset())),
|
| 270 |
+
}
|
| 271 |
+
payload.append(record)
|
| 272 |
+
return payload
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
def print_cache_entries_table(
|
| 276 |
+
entries: List[CacheEntry], *, include_revisions: bool, repo_refs_map: RepoRefsMap
|
| 277 |
+
) -> None:
|
| 278 |
+
"""Render cache entries as a table and show a human-readable summary."""
|
| 279 |
+
if not entries:
|
| 280 |
+
message = "No cached revisions found." if include_revisions else "No cached repositories found."
|
| 281 |
+
print(message)
|
| 282 |
+
return
|
| 283 |
+
table_rows: List[List[str]]
|
| 284 |
+
if include_revisions:
|
| 285 |
+
headers = ["ID", "REVISION", "SIZE", "LAST_MODIFIED", "REFS"]
|
| 286 |
+
table_rows = [
|
| 287 |
+
[
|
| 288 |
+
repo.cache_id,
|
| 289 |
+
revision.commit_hash,
|
| 290 |
+
revision.size_on_disk_str.rjust(8),
|
| 291 |
+
revision.last_modified_str,
|
| 292 |
+
" ".join(sorted(revision.refs)),
|
| 293 |
+
]
|
| 294 |
+
for repo, revision in entries
|
| 295 |
+
if revision is not None
|
| 296 |
+
]
|
| 297 |
+
else:
|
| 298 |
+
headers = ["ID", "SIZE", "LAST_ACCESSED", "LAST_MODIFIED", "REFS"]
|
| 299 |
+
table_rows = [
|
| 300 |
+
[
|
| 301 |
+
repo.cache_id,
|
| 302 |
+
repo.size_on_disk_str.rjust(8),
|
| 303 |
+
repo.last_accessed_str or "",
|
| 304 |
+
repo.last_modified_str,
|
| 305 |
+
" ".join(sorted(repo_refs_map.get(repo, frozenset()))),
|
| 306 |
+
]
|
| 307 |
+
for repo, _ in entries
|
| 308 |
+
]
|
| 309 |
+
|
| 310 |
+
print(tabulate(table_rows, headers=headers)) # type: ignore[arg-type]
|
| 311 |
+
|
| 312 |
+
unique_repos = {repo for repo, _ in entries}
|
| 313 |
+
repo_count = len(unique_repos)
|
| 314 |
+
if include_revisions:
|
| 315 |
+
revision_count = sum(1 for _, revision in entries if revision is not None)
|
| 316 |
+
total_size = sum(revision.size_on_disk for _, revision in entries if revision is not None)
|
| 317 |
+
else:
|
| 318 |
+
revision_count = sum(len(repo.revisions) for repo in unique_repos)
|
| 319 |
+
total_size = sum(repo.size_on_disk for repo in unique_repos)
|
| 320 |
+
|
| 321 |
+
summary = f"\nFound {repo_count} repo(s) for a total of {revision_count} revision(s) and {_format_size(total_size)} on disk."
|
| 322 |
+
print(ANSI.bold(summary))
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def print_cache_entries_json(
|
| 326 |
+
entries: List[CacheEntry], *, include_revisions: bool, repo_refs_map: RepoRefsMap
|
| 327 |
+
) -> None:
|
| 328 |
+
"""Dump cache entries as JSON for scripting or automation."""
|
| 329 |
+
payload = _build_cache_export_payload(entries, include_revisions=include_revisions, repo_refs_map=repo_refs_map)
|
| 330 |
+
json.dump(payload, sys.stdout, indent=2)
|
| 331 |
+
sys.stdout.write("\n")
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _compare_numeric(left: Optional[float], op: str, right: float) -> bool:
|
| 335 |
+
"""Evaluate numeric comparisons for filters."""
|
| 336 |
+
if left is None:
|
| 337 |
+
return False
|
| 338 |
+
|
| 339 |
+
comparisons = {
|
| 340 |
+
"=": left == right,
|
| 341 |
+
"!=": left != right,
|
| 342 |
+
">": left > right,
|
| 343 |
+
"<": left < right,
|
| 344 |
+
">=": left >= right,
|
| 345 |
+
"<=": left <= right,
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
if op not in comparisons:
|
| 349 |
+
raise ValueError(f"Unsupported numeric comparison operator: {op}")
|
| 350 |
+
|
| 351 |
+
return comparisons[op]
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def compile_cache_sort(sort_expr: str) -> tuple[Callable[[CacheEntry], tuple[Any, ...]], bool]:
|
| 355 |
+
"""Convert a `hf cache ls` sort expression into a key function for sorting entries.
|
| 356 |
+
|
| 357 |
+
Returns:
|
| 358 |
+
A tuple of (key_function, reverse_flag) where reverse_flag indicates whether
|
| 359 |
+
to sort in descending order (True) or ascending order (False).
|
| 360 |
+
"""
|
| 361 |
+
match = _SORT_PATTERN.match(sort_expr.strip().lower())
|
| 362 |
+
if not match:
|
| 363 |
+
raise ValueError(f"Invalid sort expression: '{sort_expr}'. Expected format: 'key' or 'key:asc' or 'key:desc'.")
|
| 364 |
+
|
| 365 |
+
key = match.group("key").lower()
|
| 366 |
+
explicit_order = match.group("order")
|
| 367 |
+
|
| 368 |
+
if key not in _SORT_KEYS:
|
| 369 |
+
raise ValueError(f"Unsupported sort key '{key}' in '{sort_expr}'. Must be one of {list(_SORT_KEYS)}.")
|
| 370 |
+
|
| 371 |
+
# Use explicit order if provided, otherwise use default for the key
|
| 372 |
+
order = explicit_order if explicit_order else _SORT_DEFAULT_ORDER[key]
|
| 373 |
+
reverse = order == "desc"
|
| 374 |
+
|
| 375 |
+
def _sort_key(entry: CacheEntry) -> tuple[Any, ...]:
|
| 376 |
+
repo, revision = entry
|
| 377 |
+
|
| 378 |
+
if key == "name":
|
| 379 |
+
# Sort by cache_id (repo type/id)
|
| 380 |
+
value: Any = repo.cache_id.lower()
|
| 381 |
+
return (value,)
|
| 382 |
+
|
| 383 |
+
if key == "size":
|
| 384 |
+
# Use revision size if available, otherwise repo size
|
| 385 |
+
value = revision.size_on_disk if revision is not None else repo.size_on_disk
|
| 386 |
+
return (value,)
|
| 387 |
+
|
| 388 |
+
if key == "accessed":
|
| 389 |
+
# For revisions, accessed is not available per-revision, use repo's last_accessed
|
| 390 |
+
# For repos, use repo's last_accessed
|
| 391 |
+
value = repo.last_accessed if repo.last_accessed is not None else 0.0
|
| 392 |
+
return (value,)
|
| 393 |
+
|
| 394 |
+
if key == "modified":
|
| 395 |
+
# Use revision's last_modified if available, otherwise repo's last_modified
|
| 396 |
+
if revision is not None:
|
| 397 |
+
value = revision.last_modified if revision.last_modified is not None else 0.0
|
| 398 |
+
else:
|
| 399 |
+
value = repo.last_modified if repo.last_modified is not None else 0.0
|
| 400 |
+
return (value,)
|
| 401 |
+
|
| 402 |
+
# Should never reach here due to validation above
|
| 403 |
+
raise ValueError(f"Unsupported sort key: {key}")
|
| 404 |
+
|
| 405 |
+
return _sort_key, reverse
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def _resolve_deletion_targets(hf_cache_info: HFCacheInfo, targets: list[str]) -> _DeletionResolution:
|
| 409 |
+
"""Resolve the deletion targets into a deletion resolution."""
|
| 410 |
+
repo_lookup, revision_lookup = build_cache_index(hf_cache_info)
|
| 411 |
+
|
| 412 |
+
selected: dict[CachedRepoInfo, set[CachedRevisionInfo]] = defaultdict(set)
|
| 413 |
+
revisions: set[str] = set()
|
| 414 |
+
missing: list[str] = []
|
| 415 |
+
|
| 416 |
+
for raw_target in targets:
|
| 417 |
+
target = raw_target.strip()
|
| 418 |
+
if not target:
|
| 419 |
+
continue
|
| 420 |
+
lowered = target.lower()
|
| 421 |
+
|
| 422 |
+
if re.fullmatch(r"[0-9a-fA-F]{40}", lowered):
|
| 423 |
+
match = revision_lookup.get(lowered)
|
| 424 |
+
if match is None:
|
| 425 |
+
missing.append(raw_target)
|
| 426 |
+
continue
|
| 427 |
+
repo, revision = match
|
| 428 |
+
selected[repo].add(revision)
|
| 429 |
+
revisions.add(revision.commit_hash)
|
| 430 |
+
continue
|
| 431 |
+
|
| 432 |
+
matched_repo = repo_lookup.get(lowered)
|
| 433 |
+
if matched_repo is None:
|
| 434 |
+
missing.append(raw_target)
|
| 435 |
+
continue
|
| 436 |
+
|
| 437 |
+
for revision in matched_repo.revisions:
|
| 438 |
+
selected[matched_repo].add(revision)
|
| 439 |
+
revisions.add(revision.commit_hash)
|
| 440 |
+
|
| 441 |
+
frozen_selected = {repo: frozenset(revs) for repo, revs in selected.items()}
|
| 442 |
+
return _DeletionResolution(
|
| 443 |
+
revisions=frozenset(revisions),
|
| 444 |
+
selected=frozen_selected,
|
| 445 |
+
missing=tuple(missing),
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
#### Cache CLI commands
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
@cache_cli.command(
|
| 453 |
+
examples=[
|
| 454 |
+
"hf cache ls",
|
| 455 |
+
"hf cache ls --revisions",
|
| 456 |
+
'hf cache ls --filter "size>1GB" --limit 20',
|
| 457 |
+
"hf cache ls --format json",
|
| 458 |
+
],
|
| 459 |
+
)
|
| 460 |
+
def ls(
|
| 461 |
+
cache_dir: Annotated[
|
| 462 |
+
Optional[str],
|
| 463 |
+
typer.Option(
|
| 464 |
+
help="Cache directory to scan (defaults to Hugging Face cache).",
|
| 465 |
+
),
|
| 466 |
+
] = None,
|
| 467 |
+
revisions: Annotated[
|
| 468 |
+
bool,
|
| 469 |
+
typer.Option(
|
| 470 |
+
help="Include revisions in the output instead of aggregated repositories.",
|
| 471 |
+
),
|
| 472 |
+
] = False,
|
| 473 |
+
filter: Annotated[
|
| 474 |
+
Optional[list[str]],
|
| 475 |
+
typer.Option(
|
| 476 |
+
"-f",
|
| 477 |
+
"--filter",
|
| 478 |
+
help="Filter entries (e.g. 'size>1GB', 'type=model', 'accessed>7d'). Can be used multiple times.",
|
| 479 |
+
),
|
| 480 |
+
] = None,
|
| 481 |
+
format: Annotated[
|
| 482 |
+
OutputFormat,
|
| 483 |
+
typer.Option(
|
| 484 |
+
help="Output format.",
|
| 485 |
+
),
|
| 486 |
+
] = OutputFormat.table,
|
| 487 |
+
quiet: Annotated[
|
| 488 |
+
bool,
|
| 489 |
+
typer.Option(
|
| 490 |
+
"-q",
|
| 491 |
+
"--quiet",
|
| 492 |
+
help="Print only IDs (repo IDs or revision hashes).",
|
| 493 |
+
),
|
| 494 |
+
] = False,
|
| 495 |
+
sort: Annotated[
|
| 496 |
+
Optional[SortOptions],
|
| 497 |
+
typer.Option(
|
| 498 |
+
help="Sort entries by key. Supported keys: 'accessed', 'modified', 'name', 'size'. "
|
| 499 |
+
"Append ':asc' or ':desc' to explicitly set the order (e.g., 'modified:asc'). "
|
| 500 |
+
"Defaults: 'accessed', 'modified', 'size' default to 'desc' (newest/biggest first); "
|
| 501 |
+
"'name' defaults to 'asc' (alphabetical).",
|
| 502 |
+
),
|
| 503 |
+
] = None,
|
| 504 |
+
limit: Annotated[
|
| 505 |
+
Optional[int],
|
| 506 |
+
typer.Option(
|
| 507 |
+
help="Limit the number of results returned. Returns only the top N entries after sorting.",
|
| 508 |
+
),
|
| 509 |
+
] = None,
|
| 510 |
+
) -> None:
|
| 511 |
+
"""List cached repositories or revisions."""
|
| 512 |
+
try:
|
| 513 |
+
hf_cache_info = scan_cache_dir(cache_dir)
|
| 514 |
+
except CacheNotFound as exc:
|
| 515 |
+
raise CLIError(f"Cache directory not found: {exc.cache_dir}") from exc
|
| 516 |
+
|
| 517 |
+
filters = filter or []
|
| 518 |
+
|
| 519 |
+
entries, repo_refs_map = collect_cache_entries(hf_cache_info, include_revisions=revisions)
|
| 520 |
+
try:
|
| 521 |
+
filter_fns = [compile_cache_filter(expr, repo_refs_map) for expr in filters]
|
| 522 |
+
except ValueError as exc:
|
| 523 |
+
raise typer.BadParameter(str(exc)) from exc
|
| 524 |
+
|
| 525 |
+
now = time.time()
|
| 526 |
+
for fn in filter_fns:
|
| 527 |
+
entries = [entry for entry in entries if fn(entry[0], entry[1], now)]
|
| 528 |
+
|
| 529 |
+
# Apply sorting if requested
|
| 530 |
+
if sort:
|
| 531 |
+
try:
|
| 532 |
+
sort_key_fn, reverse = compile_cache_sort(sort.value)
|
| 533 |
+
entries.sort(key=sort_key_fn, reverse=reverse)
|
| 534 |
+
except ValueError as exc:
|
| 535 |
+
raise typer.BadParameter(str(exc)) from exc
|
| 536 |
+
|
| 537 |
+
# Apply limit if requested
|
| 538 |
+
if limit is not None:
|
| 539 |
+
if limit < 0:
|
| 540 |
+
raise typer.BadParameter(f"Limit must be a positive integer, got {limit}.")
|
| 541 |
+
entries = entries[:limit]
|
| 542 |
+
|
| 543 |
+
if quiet:
|
| 544 |
+
for repo, revision in entries:
|
| 545 |
+
print(revision.commit_hash if revision is not None else repo.cache_id)
|
| 546 |
+
return
|
| 547 |
+
|
| 548 |
+
formatters = {
|
| 549 |
+
OutputFormat.table: print_cache_entries_table,
|
| 550 |
+
OutputFormat.json: print_cache_entries_json,
|
| 551 |
+
}
|
| 552 |
+
return formatters[format](entries, include_revisions=revisions, repo_refs_map=repo_refs_map)
|
| 553 |
+
|
| 554 |
+
|
| 555 |
+
@cache_cli.command(
|
| 556 |
+
examples=[
|
| 557 |
+
"hf cache rm model/gpt2",
|
| 558 |
+
"hf cache rm <revision_hash>",
|
| 559 |
+
"hf cache rm model/gpt2 --dry-run",
|
| 560 |
+
"hf cache rm model/gpt2 --yes",
|
| 561 |
+
],
|
| 562 |
+
)
|
| 563 |
+
def rm(
|
| 564 |
+
targets: Annotated[
|
| 565 |
+
list[str],
|
| 566 |
+
typer.Argument(
|
| 567 |
+
help="One or more repo IDs (e.g. model/bert-base-uncased) or revision hashes to delete.",
|
| 568 |
+
),
|
| 569 |
+
],
|
| 570 |
+
cache_dir: Annotated[
|
| 571 |
+
Optional[str],
|
| 572 |
+
typer.Option(
|
| 573 |
+
help="Cache directory to scan (defaults to Hugging Face cache).",
|
| 574 |
+
),
|
| 575 |
+
] = None,
|
| 576 |
+
yes: Annotated[
|
| 577 |
+
bool,
|
| 578 |
+
typer.Option(
|
| 579 |
+
"-y",
|
| 580 |
+
"--yes",
|
| 581 |
+
help="Skip confirmation prompt.",
|
| 582 |
+
),
|
| 583 |
+
] = False,
|
| 584 |
+
dry_run: Annotated[
|
| 585 |
+
bool,
|
| 586 |
+
typer.Option(
|
| 587 |
+
help="Preview deletions without removing anything.",
|
| 588 |
+
),
|
| 589 |
+
] = False,
|
| 590 |
+
) -> None:
|
| 591 |
+
"""Remove cached repositories or revisions."""
|
| 592 |
+
try:
|
| 593 |
+
hf_cache_info = scan_cache_dir(cache_dir)
|
| 594 |
+
except CacheNotFound as exc:
|
| 595 |
+
raise CLIError(f"Cache directory not found: {exc.cache_dir}") from exc
|
| 596 |
+
|
| 597 |
+
resolution = _resolve_deletion_targets(hf_cache_info, targets)
|
| 598 |
+
|
| 599 |
+
if resolution.missing:
|
| 600 |
+
print("Could not find the following targets in the cache:")
|
| 601 |
+
for entry in resolution.missing:
|
| 602 |
+
print(f" - {entry}")
|
| 603 |
+
|
| 604 |
+
if len(resolution.revisions) == 0:
|
| 605 |
+
print("Nothing to delete.")
|
| 606 |
+
raise typer.Exit(code=0)
|
| 607 |
+
|
| 608 |
+
strategy = hf_cache_info.delete_revisions(*sorted(resolution.revisions))
|
| 609 |
+
counts = summarize_deletions(resolution.selected)
|
| 610 |
+
|
| 611 |
+
summary_parts: list[str] = []
|
| 612 |
+
if counts.repo_count:
|
| 613 |
+
summary_parts.append(f"{counts.repo_count} repo(s)")
|
| 614 |
+
if counts.partial_revision_count:
|
| 615 |
+
summary_parts.append(f"{counts.partial_revision_count} revision(s)")
|
| 616 |
+
if not summary_parts:
|
| 617 |
+
summary_parts.append(f"{counts.total_revision_count} revision(s)")
|
| 618 |
+
|
| 619 |
+
summary_text = " and ".join(summary_parts)
|
| 620 |
+
print(f"About to delete {summary_text} totalling {strategy.expected_freed_size_str}.")
|
| 621 |
+
print_cache_selected_revisions(resolution.selected)
|
| 622 |
+
|
| 623 |
+
if dry_run:
|
| 624 |
+
print("Dry run: no files were deleted.")
|
| 625 |
+
return
|
| 626 |
+
|
| 627 |
+
if not yes and not typer.confirm("Proceed with deletion?", default=False):
|
| 628 |
+
print("Deletion cancelled.")
|
| 629 |
+
return
|
| 630 |
+
|
| 631 |
+
strategy.execute()
|
| 632 |
+
counts = summarize_deletions(resolution.selected)
|
| 633 |
+
print(
|
| 634 |
+
f"Deleted {counts.repo_count} repo(s) and {counts.total_revision_count} revision(s); freed {strategy.expected_freed_size_str}."
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
|
| 638 |
+
@cache_cli.command(examples=["hf cache prune", "hf cache prune --dry-run"])
|
| 639 |
+
def prune(
|
| 640 |
+
cache_dir: Annotated[
|
| 641 |
+
Optional[str],
|
| 642 |
+
typer.Option(
|
| 643 |
+
help="Cache directory to scan (defaults to Hugging Face cache).",
|
| 644 |
+
),
|
| 645 |
+
] = None,
|
| 646 |
+
yes: Annotated[
|
| 647 |
+
bool,
|
| 648 |
+
typer.Option(
|
| 649 |
+
"-y",
|
| 650 |
+
"--yes",
|
| 651 |
+
help="Skip confirmation prompt.",
|
| 652 |
+
),
|
| 653 |
+
] = False,
|
| 654 |
+
dry_run: Annotated[
|
| 655 |
+
bool,
|
| 656 |
+
typer.Option(
|
| 657 |
+
help="Preview deletions without removing anything.",
|
| 658 |
+
),
|
| 659 |
+
] = False,
|
| 660 |
+
) -> None:
|
| 661 |
+
"""Remove detached revisions from the cache."""
|
| 662 |
+
try:
|
| 663 |
+
hf_cache_info = scan_cache_dir(cache_dir)
|
| 664 |
+
except CacheNotFound as exc:
|
| 665 |
+
raise CLIError(f"Cache directory not found: {exc.cache_dir}") from exc
|
| 666 |
+
|
| 667 |
+
selected: dict[CachedRepoInfo, frozenset[CachedRevisionInfo]] = {}
|
| 668 |
+
revisions: set[str] = set()
|
| 669 |
+
for repo in hf_cache_info.repos:
|
| 670 |
+
detached = frozenset(revision for revision in repo.revisions if len(revision.refs) == 0)
|
| 671 |
+
if not detached:
|
| 672 |
+
continue
|
| 673 |
+
selected[repo] = detached
|
| 674 |
+
revisions.update(revision.commit_hash for revision in detached)
|
| 675 |
+
|
| 676 |
+
if len(revisions) == 0:
|
| 677 |
+
print("No unreferenced revisions found. Nothing to prune.")
|
| 678 |
+
return
|
| 679 |
+
|
| 680 |
+
resolution = _DeletionResolution(
|
| 681 |
+
revisions=frozenset(revisions),
|
| 682 |
+
selected=selected,
|
| 683 |
+
missing=(),
|
| 684 |
+
)
|
| 685 |
+
strategy = hf_cache_info.delete_revisions(*sorted(resolution.revisions))
|
| 686 |
+
counts = summarize_deletions(selected)
|
| 687 |
+
|
| 688 |
+
print(
|
| 689 |
+
f"About to delete {counts.total_revision_count} unreferenced revision(s) ({strategy.expected_freed_size_str} total)."
|
| 690 |
+
)
|
| 691 |
+
print_cache_selected_revisions(selected)
|
| 692 |
+
|
| 693 |
+
if dry_run:
|
| 694 |
+
print("Dry run: no files were deleted.")
|
| 695 |
+
return
|
| 696 |
+
|
| 697 |
+
if not yes and not typer.confirm("Proceed?"):
|
| 698 |
+
print("Pruning cancelled.")
|
| 699 |
+
return
|
| 700 |
+
|
| 701 |
+
strategy.execute()
|
| 702 |
+
print(f"Deleted {counts.total_revision_count} unreferenced revision(s); freed {strategy.expected_freed_size_str}.")
|
| 703 |
+
|
| 704 |
+
|
| 705 |
+
@cache_cli.command(
|
| 706 |
+
examples=[
|
| 707 |
+
"hf cache verify gpt2",
|
| 708 |
+
"hf cache verify gpt2 --revision refs/pr/1",
|
| 709 |
+
"hf cache verify my-dataset --repo-type dataset",
|
| 710 |
+
],
|
| 711 |
+
)
|
| 712 |
+
def verify(
|
| 713 |
+
repo_id: RepoIdArg,
|
| 714 |
+
repo_type: RepoTypeOpt = RepoTypeOpt.model,
|
| 715 |
+
revision: RevisionOpt = None,
|
| 716 |
+
cache_dir: Annotated[
|
| 717 |
+
Optional[str],
|
| 718 |
+
typer.Option(
|
| 719 |
+
help="Cache directory to use when verifying files from cache (defaults to Hugging Face cache).",
|
| 720 |
+
),
|
| 721 |
+
] = None,
|
| 722 |
+
local_dir: Annotated[
|
| 723 |
+
Optional[str],
|
| 724 |
+
typer.Option(
|
| 725 |
+
help="If set, verify files under this directory instead of the cache.",
|
| 726 |
+
),
|
| 727 |
+
] = None,
|
| 728 |
+
fail_on_missing_files: Annotated[
|
| 729 |
+
bool,
|
| 730 |
+
typer.Option(
|
| 731 |
+
"--fail-on-missing-files",
|
| 732 |
+
help="Fail if some files exist on the remote but are missing locally.",
|
| 733 |
+
),
|
| 734 |
+
] = False,
|
| 735 |
+
fail_on_extra_files: Annotated[
|
| 736 |
+
bool,
|
| 737 |
+
typer.Option(
|
| 738 |
+
"--fail-on-extra-files",
|
| 739 |
+
help="Fail if some files exist locally but are not present on the remote revision.",
|
| 740 |
+
),
|
| 741 |
+
] = False,
|
| 742 |
+
token: TokenOpt = None,
|
| 743 |
+
) -> None:
|
| 744 |
+
"""Verify checksums for a single repo revision from cache or a local directory.
|
| 745 |
+
|
| 746 |
+
Examples:
|
| 747 |
+
- Verify main revision in cache: `hf cache verify gpt2`
|
| 748 |
+
- Verify specific revision: `hf cache verify gpt2 --revision refs/pr/1`
|
| 749 |
+
- Verify dataset: `hf cache verify karpathy/fineweb-edu-100b-shuffle --repo-type dataset`
|
| 750 |
+
- Verify local dir: `hf cache verify deepseek-ai/DeepSeek-OCR --local-dir /path/to/repo`
|
| 751 |
+
"""
|
| 752 |
+
|
| 753 |
+
if local_dir is not None and cache_dir is not None:
|
| 754 |
+
print("Cannot pass both --local-dir and --cache-dir. Use one or the other.")
|
| 755 |
+
raise typer.Exit(code=2)
|
| 756 |
+
|
| 757 |
+
api = get_hf_api(token=token)
|
| 758 |
+
|
| 759 |
+
result = api.verify_repo_checksums(
|
| 760 |
+
repo_id=repo_id,
|
| 761 |
+
repo_type=repo_type.value if hasattr(repo_type, "value") else str(repo_type),
|
| 762 |
+
revision=revision,
|
| 763 |
+
local_dir=local_dir,
|
| 764 |
+
cache_dir=cache_dir,
|
| 765 |
+
token=token,
|
| 766 |
+
)
|
| 767 |
+
|
| 768 |
+
exit_code = 0
|
| 769 |
+
|
| 770 |
+
has_mismatches = bool(result.mismatches)
|
| 771 |
+
if has_mismatches:
|
| 772 |
+
print("❌ Checksum verification failed for the following file(s):")
|
| 773 |
+
for m in result.mismatches:
|
| 774 |
+
print(f" - {m['path']}: expected {m['expected']} ({m['algorithm']}), got {m['actual']}")
|
| 775 |
+
exit_code = 1
|
| 776 |
+
|
| 777 |
+
if result.missing_paths:
|
| 778 |
+
if fail_on_missing_files:
|
| 779 |
+
print("Missing files (present remotely, absent locally):")
|
| 780 |
+
for p in result.missing_paths:
|
| 781 |
+
print(f" - {p}")
|
| 782 |
+
exit_code = 1
|
| 783 |
+
else:
|
| 784 |
+
warning = (
|
| 785 |
+
f"{len(result.missing_paths)} remote file(s) are missing locally. "
|
| 786 |
+
"Use --fail-on-missing-files for details."
|
| 787 |
+
)
|
| 788 |
+
print(f"⚠️ {warning}")
|
| 789 |
+
|
| 790 |
+
if result.extra_paths:
|
| 791 |
+
if fail_on_extra_files:
|
| 792 |
+
print("Extra files (present locally, absent remotely):")
|
| 793 |
+
for p in result.extra_paths:
|
| 794 |
+
print(f" - {p}")
|
| 795 |
+
exit_code = 1
|
| 796 |
+
else:
|
| 797 |
+
warning = (
|
| 798 |
+
f"{len(result.extra_paths)} local file(s) do not exist on the remote repo. "
|
| 799 |
+
"Use --fail-on-extra-files for details."
|
| 800 |
+
)
|
| 801 |
+
print(f"⚠️ {warning}")
|
| 802 |
+
|
| 803 |
+
verified_location = result.verified_path
|
| 804 |
+
|
| 805 |
+
if exit_code != 0:
|
| 806 |
+
print(f"❌ Verification failed for '{repo_id}' ({repo_type.value}) in {verified_location}.")
|
| 807 |
+
print(f" Revision: {result.revision}")
|
| 808 |
+
raise typer.Exit(code=exit_code)
|
| 809 |
+
|
| 810 |
+
print(f"✅ Verified {result.checked_count} file(s) for '{repo_id}' ({repo_type.value}) in {verified_location}")
|
| 811 |
+
print(" All checksums match.")
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/collections.py
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2026 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to interact with collections on the Hugging Face Hub.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# list collections on the Hub
|
| 18 |
+
hf collections ls
|
| 19 |
+
|
| 20 |
+
# list collections for a specific user
|
| 21 |
+
hf collections ls --owner username
|
| 22 |
+
|
| 23 |
+
# get info about a collection
|
| 24 |
+
hf collections info username/collection-slug
|
| 25 |
+
|
| 26 |
+
# create a new collection
|
| 27 |
+
hf collections create "My Collection" --description "A collection of models"
|
| 28 |
+
|
| 29 |
+
# add an item to a collection
|
| 30 |
+
hf collections add-item username/collection-slug username/model-name model
|
| 31 |
+
|
| 32 |
+
# delete a collection
|
| 33 |
+
hf collections delete username/collection-slug
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
import enum
|
| 37 |
+
import json
|
| 38 |
+
from typing import Annotated, Optional, get_args
|
| 39 |
+
|
| 40 |
+
import typer
|
| 41 |
+
|
| 42 |
+
from huggingface_hub.hf_api import CollectionItemType_T, CollectionSort_T
|
| 43 |
+
|
| 44 |
+
from ._cli_utils import (
|
| 45 |
+
FormatOpt,
|
| 46 |
+
LimitOpt,
|
| 47 |
+
OutputFormat,
|
| 48 |
+
QuietOpt,
|
| 49 |
+
TokenOpt,
|
| 50 |
+
api_object_to_dict,
|
| 51 |
+
get_hf_api,
|
| 52 |
+
print_list_output,
|
| 53 |
+
typer_factory,
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Build enums dynamically from Literal types to avoid duplication
|
| 58 |
+
_COLLECTION_ITEM_TYPES = get_args(CollectionItemType_T)
|
| 59 |
+
CollectionItemType = enum.Enum("CollectionItemType", {t: t for t in _COLLECTION_ITEM_TYPES}, type=str) # type: ignore[misc]
|
| 60 |
+
|
| 61 |
+
_COLLECTION_SORT_OPTIONS = get_args(CollectionSort_T)
|
| 62 |
+
CollectionSort = enum.Enum("CollectionSort", {s: s for s in _COLLECTION_SORT_OPTIONS}, type=str) # type: ignore[misc]
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
collections_cli = typer_factory(help="Interact with collections on the Hub.")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@collections_cli.command(
|
| 69 |
+
"ls",
|
| 70 |
+
examples=[
|
| 71 |
+
"hf collections ls",
|
| 72 |
+
"hf collections ls --owner nvidia",
|
| 73 |
+
"hf collections ls --item models/teknium/OpenHermes-2.5-Mistral-7B --limit 10",
|
| 74 |
+
],
|
| 75 |
+
)
|
| 76 |
+
def collections_ls(
|
| 77 |
+
owner: Annotated[
|
| 78 |
+
Optional[str],
|
| 79 |
+
typer.Option(help="Filter by owner username or organization."),
|
| 80 |
+
] = None,
|
| 81 |
+
item: Annotated[
|
| 82 |
+
Optional[str],
|
| 83 |
+
typer.Option(
|
| 84 |
+
help='Filter collections containing a specific item (e.g., "models/gpt2", "datasets/squad", "papers/2311.12983").'
|
| 85 |
+
),
|
| 86 |
+
] = None,
|
| 87 |
+
sort: Annotated[
|
| 88 |
+
Optional[CollectionSort],
|
| 89 |
+
typer.Option(help="Sort results by last modified, trending, or upvotes."),
|
| 90 |
+
] = None,
|
| 91 |
+
limit: LimitOpt = 10,
|
| 92 |
+
format: FormatOpt = OutputFormat.table,
|
| 93 |
+
quiet: QuietOpt = False,
|
| 94 |
+
token: TokenOpt = None,
|
| 95 |
+
) -> None:
|
| 96 |
+
"""List collections on the Hub."""
|
| 97 |
+
api = get_hf_api(token=token)
|
| 98 |
+
sort_key = sort.value if sort else None
|
| 99 |
+
results = [
|
| 100 |
+
api_object_to_dict(collection)
|
| 101 |
+
for collection in api.list_collections(
|
| 102 |
+
owner=owner,
|
| 103 |
+
item=item,
|
| 104 |
+
sort=sort_key, # type: ignore[arg-type]
|
| 105 |
+
limit=limit,
|
| 106 |
+
)
|
| 107 |
+
]
|
| 108 |
+
print_list_output(results, format=format, quiet=quiet)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@collections_cli.command(
|
| 112 |
+
"info",
|
| 113 |
+
examples=[
|
| 114 |
+
"hf collections info username/my-collection-slug",
|
| 115 |
+
],
|
| 116 |
+
)
|
| 117 |
+
def collections_info(
|
| 118 |
+
collection_slug: Annotated[str, typer.Argument(help="The collection slug (e.g., 'username/collection-slug').")],
|
| 119 |
+
token: TokenOpt = None,
|
| 120 |
+
) -> None:
|
| 121 |
+
"""Get info about a collection on the Hub."""
|
| 122 |
+
api = get_hf_api(token=token)
|
| 123 |
+
collection = api.get_collection(collection_slug)
|
| 124 |
+
print(json.dumps(api_object_to_dict(collection), indent=2))
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
@collections_cli.command(
|
| 128 |
+
"create",
|
| 129 |
+
examples=[
|
| 130 |
+
'hf collections create "My Models"',
|
| 131 |
+
'hf collections create "My Models" --description "A collection of my favorite models" --private',
|
| 132 |
+
'hf collections create "Org Collection" --namespace my-org',
|
| 133 |
+
],
|
| 134 |
+
)
|
| 135 |
+
def collections_create(
|
| 136 |
+
title: Annotated[str, typer.Argument(help="The title of the collection.")],
|
| 137 |
+
namespace: Annotated[
|
| 138 |
+
Optional[str],
|
| 139 |
+
typer.Option(help="The namespace (username or organization). Defaults to the authenticated user."),
|
| 140 |
+
] = None,
|
| 141 |
+
description: Annotated[
|
| 142 |
+
Optional[str],
|
| 143 |
+
typer.Option(help="A description for the collection."),
|
| 144 |
+
] = None,
|
| 145 |
+
private: Annotated[
|
| 146 |
+
bool,
|
| 147 |
+
typer.Option(help="Create a private collection."),
|
| 148 |
+
] = False,
|
| 149 |
+
exists_ok: Annotated[
|
| 150 |
+
bool,
|
| 151 |
+
typer.Option(help="Do not raise an error if the collection already exists."),
|
| 152 |
+
] = False,
|
| 153 |
+
token: TokenOpt = None,
|
| 154 |
+
) -> None:
|
| 155 |
+
"""Create a new collection on the Hub."""
|
| 156 |
+
api = get_hf_api(token=token)
|
| 157 |
+
collection = api.create_collection(
|
| 158 |
+
title=title,
|
| 159 |
+
namespace=namespace,
|
| 160 |
+
description=description,
|
| 161 |
+
private=private,
|
| 162 |
+
exists_ok=exists_ok,
|
| 163 |
+
)
|
| 164 |
+
print(f"Collection created: {collection.url}")
|
| 165 |
+
print(json.dumps(api_object_to_dict(collection), indent=2))
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
@collections_cli.command(
|
| 169 |
+
"update",
|
| 170 |
+
examples=[
|
| 171 |
+
'hf collections update username/my-collection --title "New Title"',
|
| 172 |
+
'hf collections update username/my-collection --description "Updated description"',
|
| 173 |
+
"hf collections update username/my-collection --private --theme green",
|
| 174 |
+
],
|
| 175 |
+
)
|
| 176 |
+
def collections_update(
|
| 177 |
+
collection_slug: Annotated[str, typer.Argument(help="The collection slug (e.g., 'username/collection-slug').")],
|
| 178 |
+
title: Annotated[
|
| 179 |
+
Optional[str],
|
| 180 |
+
typer.Option(help="The new title for the collection."),
|
| 181 |
+
] = None,
|
| 182 |
+
description: Annotated[
|
| 183 |
+
Optional[str],
|
| 184 |
+
typer.Option(help="The new description for the collection."),
|
| 185 |
+
] = None,
|
| 186 |
+
position: Annotated[
|
| 187 |
+
Optional[int],
|
| 188 |
+
typer.Option(help="The new position of the collection in the owner's list."),
|
| 189 |
+
] = None,
|
| 190 |
+
private: Annotated[
|
| 191 |
+
Optional[bool],
|
| 192 |
+
typer.Option(help="Whether the collection should be private."),
|
| 193 |
+
] = None,
|
| 194 |
+
theme: Annotated[
|
| 195 |
+
Optional[str],
|
| 196 |
+
typer.Option(help="The theme color for the collection (e.g., 'green', 'blue')."),
|
| 197 |
+
] = None,
|
| 198 |
+
token: TokenOpt = None,
|
| 199 |
+
) -> None:
|
| 200 |
+
"""Update a collection's metadata on the Hub."""
|
| 201 |
+
api = get_hf_api(token=token)
|
| 202 |
+
collection = api.update_collection_metadata(
|
| 203 |
+
collection_slug=collection_slug,
|
| 204 |
+
title=title,
|
| 205 |
+
description=description,
|
| 206 |
+
position=position,
|
| 207 |
+
private=private,
|
| 208 |
+
theme=theme,
|
| 209 |
+
)
|
| 210 |
+
print(f"Collection updated: {collection.url}")
|
| 211 |
+
print(json.dumps(api_object_to_dict(collection), indent=2))
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
@collections_cli.command(
|
| 215 |
+
"delete",
|
| 216 |
+
examples=[
|
| 217 |
+
"hf collections delete username/my-collection",
|
| 218 |
+
"hf collections delete username/my-collection --missing-ok",
|
| 219 |
+
],
|
| 220 |
+
)
|
| 221 |
+
def collections_delete(
|
| 222 |
+
collection_slug: Annotated[str, typer.Argument(help="The collection slug (e.g., 'username/collection-slug').")],
|
| 223 |
+
missing_ok: Annotated[
|
| 224 |
+
bool,
|
| 225 |
+
typer.Option(help="Do not raise an error if the collection doesn't exist."),
|
| 226 |
+
] = False,
|
| 227 |
+
token: TokenOpt = None,
|
| 228 |
+
) -> None:
|
| 229 |
+
"""Delete a collection from the Hub."""
|
| 230 |
+
api = get_hf_api(token=token)
|
| 231 |
+
api.delete_collection(collection_slug, missing_ok=missing_ok)
|
| 232 |
+
print(f"Collection deleted: {collection_slug}")
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@collections_cli.command(
|
| 236 |
+
"add-item",
|
| 237 |
+
examples=[
|
| 238 |
+
"hf collections add-item username/my-collection moonshotai/kimi-k2 model",
|
| 239 |
+
'hf collections add-item username/my-collection Qwen/DeepPlanning dataset --note "Useful dataset"',
|
| 240 |
+
"hf collections add-item username/my-collection Tongyi-MAI/Z-Image space",
|
| 241 |
+
],
|
| 242 |
+
)
|
| 243 |
+
def collections_add_item(
|
| 244 |
+
collection_slug: Annotated[str, typer.Argument(help="The collection slug (e.g., 'username/collection-slug').")],
|
| 245 |
+
item_id: Annotated[
|
| 246 |
+
str, typer.Argument(help="The ID of the item to add (repo_id for repos, paper ID for papers).")
|
| 247 |
+
],
|
| 248 |
+
item_type: Annotated[
|
| 249 |
+
CollectionItemType,
|
| 250 |
+
typer.Argument(help="The type of item (model, dataset, space, paper, or collection)."),
|
| 251 |
+
],
|
| 252 |
+
note: Annotated[
|
| 253 |
+
Optional[str],
|
| 254 |
+
typer.Option(help="A note to attach to the item (max 500 characters)."),
|
| 255 |
+
] = None,
|
| 256 |
+
exists_ok: Annotated[
|
| 257 |
+
bool,
|
| 258 |
+
typer.Option(help="Do not raise an error if the item is already in the collection."),
|
| 259 |
+
] = False,
|
| 260 |
+
token: TokenOpt = None,
|
| 261 |
+
) -> None:
|
| 262 |
+
"""Add an item to a collection."""
|
| 263 |
+
api = get_hf_api(token=token)
|
| 264 |
+
collection = api.add_collection_item(
|
| 265 |
+
collection_slug=collection_slug,
|
| 266 |
+
item_id=item_id,
|
| 267 |
+
item_type=item_type.value, # type: ignore[arg-type]
|
| 268 |
+
note=note,
|
| 269 |
+
exists_ok=exists_ok,
|
| 270 |
+
)
|
| 271 |
+
print(f"Item added to collection: {collection_slug}")
|
| 272 |
+
print(json.dumps(api_object_to_dict(collection), indent=2))
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
@collections_cli.command(
|
| 276 |
+
"update-item",
|
| 277 |
+
examples=[
|
| 278 |
+
'hf collections update-item username/my-collection ITEM_OBJECT_ID --note "Updated note"',
|
| 279 |
+
"hf collections update-item username/my-collection ITEM_OBJECT_ID --position 0",
|
| 280 |
+
],
|
| 281 |
+
)
|
| 282 |
+
def collections_update_item(
|
| 283 |
+
collection_slug: Annotated[str, typer.Argument(help="The collection slug (e.g., 'username/collection-slug').")],
|
| 284 |
+
item_object_id: Annotated[
|
| 285 |
+
str,
|
| 286 |
+
typer.Argument(help="The ID of the item in the collection (from 'item_object_id' field, not the repo_id)."),
|
| 287 |
+
],
|
| 288 |
+
note: Annotated[
|
| 289 |
+
Optional[str],
|
| 290 |
+
typer.Option(help="A new note for the item (max 500 characters)."),
|
| 291 |
+
] = None,
|
| 292 |
+
position: Annotated[
|
| 293 |
+
Optional[int],
|
| 294 |
+
typer.Option(help="The new position of the item in the collection."),
|
| 295 |
+
] = None,
|
| 296 |
+
token: TokenOpt = None,
|
| 297 |
+
) -> None:
|
| 298 |
+
"""Update an item in a collection."""
|
| 299 |
+
api = get_hf_api(token=token)
|
| 300 |
+
api.update_collection_item(
|
| 301 |
+
collection_slug=collection_slug,
|
| 302 |
+
item_object_id=item_object_id,
|
| 303 |
+
note=note,
|
| 304 |
+
position=position,
|
| 305 |
+
)
|
| 306 |
+
print(f"Item updated in collection: {collection_slug}")
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@collections_cli.command("delete-item")
|
| 310 |
+
def collections_delete_item(
|
| 311 |
+
collection_slug: Annotated[str, typer.Argument(help="The collection slug (e.g., 'username/collection-slug').")],
|
| 312 |
+
item_object_id: Annotated[
|
| 313 |
+
str,
|
| 314 |
+
typer.Argument(
|
| 315 |
+
help="The ID of the item in the collection (retrieved from `item_object_id` field returned by 'hf collections info'."
|
| 316 |
+
),
|
| 317 |
+
],
|
| 318 |
+
missing_ok: Annotated[
|
| 319 |
+
bool,
|
| 320 |
+
typer.Option(help="Do not raise an error if the item doesn't exist."),
|
| 321 |
+
] = False,
|
| 322 |
+
token: TokenOpt = None,
|
| 323 |
+
) -> None:
|
| 324 |
+
"""Delete an item from a collection."""
|
| 325 |
+
api = get_hf_api(token=token)
|
| 326 |
+
api.delete_collection_item(
|
| 327 |
+
collection_slug=collection_slug,
|
| 328 |
+
item_object_id=item_object_id,
|
| 329 |
+
missing_ok=missing_ok,
|
| 330 |
+
)
|
| 331 |
+
print(f"Item deleted from collection: {collection_slug}")
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/datasets.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2026 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to interact with datasets on the Hugging Face Hub.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# list datasets on the Hub
|
| 18 |
+
hf datasets ls
|
| 19 |
+
|
| 20 |
+
# list datasets with a search query
|
| 21 |
+
hf datasets ls --search "code"
|
| 22 |
+
|
| 23 |
+
# get info about a dataset
|
| 24 |
+
hf datasets info HuggingFaceFW/fineweb
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
import enum
|
| 28 |
+
import json
|
| 29 |
+
from typing import Annotated, Optional, get_args
|
| 30 |
+
|
| 31 |
+
import typer
|
| 32 |
+
|
| 33 |
+
from huggingface_hub.errors import CLIError, RepositoryNotFoundError, RevisionNotFoundError
|
| 34 |
+
from huggingface_hub.hf_api import DatasetSort_T, ExpandDatasetProperty_T
|
| 35 |
+
|
| 36 |
+
from ._cli_utils import (
|
| 37 |
+
AuthorOpt,
|
| 38 |
+
FilterOpt,
|
| 39 |
+
FormatOpt,
|
| 40 |
+
LimitOpt,
|
| 41 |
+
OutputFormat,
|
| 42 |
+
QuietOpt,
|
| 43 |
+
RevisionOpt,
|
| 44 |
+
SearchOpt,
|
| 45 |
+
TokenOpt,
|
| 46 |
+
api_object_to_dict,
|
| 47 |
+
get_hf_api,
|
| 48 |
+
make_expand_properties_parser,
|
| 49 |
+
print_list_output,
|
| 50 |
+
typer_factory,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
_EXPAND_PROPERTIES = sorted(get_args(ExpandDatasetProperty_T))
|
| 55 |
+
_SORT_OPTIONS = get_args(DatasetSort_T)
|
| 56 |
+
DatasetSortEnum = enum.Enum("DatasetSortEnum", {s: s for s in _SORT_OPTIONS}, type=str) # type: ignore[misc]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
ExpandOpt = Annotated[
|
| 60 |
+
Optional[str],
|
| 61 |
+
typer.Option(
|
| 62 |
+
help=f"Comma-separated properties to expand. Example: '--expand=downloads,likes,tags'. Valid: {', '.join(_EXPAND_PROPERTIES)}.",
|
| 63 |
+
callback=make_expand_properties_parser(_EXPAND_PROPERTIES),
|
| 64 |
+
),
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
datasets_cli = typer_factory(help="Interact with datasets on the Hub.")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@datasets_cli.command(
|
| 72 |
+
"ls",
|
| 73 |
+
examples=[
|
| 74 |
+
"hf datasets ls",
|
| 75 |
+
"hf datasets ls --sort downloads --limit 10",
|
| 76 |
+
'hf datasets ls --search "code"',
|
| 77 |
+
],
|
| 78 |
+
)
|
| 79 |
+
def datasets_ls(
|
| 80 |
+
search: SearchOpt = None,
|
| 81 |
+
author: AuthorOpt = None,
|
| 82 |
+
filter: FilterOpt = None,
|
| 83 |
+
sort: Annotated[
|
| 84 |
+
Optional[DatasetSortEnum],
|
| 85 |
+
typer.Option(help="Sort results."),
|
| 86 |
+
] = None,
|
| 87 |
+
limit: LimitOpt = 10,
|
| 88 |
+
expand: ExpandOpt = None,
|
| 89 |
+
format: FormatOpt = OutputFormat.table,
|
| 90 |
+
quiet: QuietOpt = False,
|
| 91 |
+
token: TokenOpt = None,
|
| 92 |
+
) -> None:
|
| 93 |
+
"""List datasets on the Hub."""
|
| 94 |
+
api = get_hf_api(token=token)
|
| 95 |
+
sort_key = sort.value if sort else None
|
| 96 |
+
results = [
|
| 97 |
+
api_object_to_dict(dataset_info)
|
| 98 |
+
for dataset_info in api.list_datasets(
|
| 99 |
+
filter=filter, author=author, search=search, sort=sort_key, limit=limit, expand=expand
|
| 100 |
+
)
|
| 101 |
+
]
|
| 102 |
+
print_list_output(results, format=format, quiet=quiet)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
@datasets_cli.command(
|
| 106 |
+
"info",
|
| 107 |
+
examples=[
|
| 108 |
+
"hf datasets info HuggingFaceFW/fineweb",
|
| 109 |
+
"hf datasets info my-dataset --expand downloads,likes,tags",
|
| 110 |
+
],
|
| 111 |
+
)
|
| 112 |
+
def datasets_info(
|
| 113 |
+
dataset_id: Annotated[str, typer.Argument(help="The dataset ID (e.g. `username/repo-name`).")],
|
| 114 |
+
revision: RevisionOpt = None,
|
| 115 |
+
expand: ExpandOpt = None,
|
| 116 |
+
token: TokenOpt = None,
|
| 117 |
+
) -> None:
|
| 118 |
+
"""Get info about a dataset on the Hub."""
|
| 119 |
+
api = get_hf_api(token=token)
|
| 120 |
+
try:
|
| 121 |
+
info = api.dataset_info(repo_id=dataset_id, revision=revision, expand=expand) # type: ignore[arg-type]
|
| 122 |
+
except RepositoryNotFoundError as e:
|
| 123 |
+
raise CLIError(f"Dataset '{dataset_id}' not found.") from e
|
| 124 |
+
except RevisionNotFoundError as e:
|
| 125 |
+
raise CLIError(f"Revision '{revision}' not found on '{dataset_id}'.") from e
|
| 126 |
+
print(json.dumps(api_object_to_dict(info), indent=2))
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/download.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 202-present, the HuggingFace Inc. team.
|
| 3 |
+
#
|
| 4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 5 |
+
# you may not use this file except in compliance with the License.
|
| 6 |
+
# You may obtain a copy of the License at
|
| 7 |
+
#
|
| 8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 9 |
+
#
|
| 10 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 13 |
+
# See the License for the specific language governing permissions and
|
| 14 |
+
# limitations under the License.
|
| 15 |
+
"""Contains command to download files from the Hub with the CLI.
|
| 16 |
+
|
| 17 |
+
Usage:
|
| 18 |
+
hf download --help
|
| 19 |
+
|
| 20 |
+
# Download file
|
| 21 |
+
hf download gpt2 config.json
|
| 22 |
+
|
| 23 |
+
# Download entire repo
|
| 24 |
+
hf download fffiloni/zeroscope --repo-type=space --revision=refs/pr/78
|
| 25 |
+
|
| 26 |
+
# Download repo with filters
|
| 27 |
+
hf download gpt2 --include="*.safetensors"
|
| 28 |
+
|
| 29 |
+
# Download with token
|
| 30 |
+
hf download Wauplin/private-model --token=hf_***
|
| 31 |
+
|
| 32 |
+
# Download quietly (no progress bar, no warnings, only the returned path)
|
| 33 |
+
hf download gpt2 config.json --quiet
|
| 34 |
+
|
| 35 |
+
# Download to local dir
|
| 36 |
+
hf download gpt2 --local-dir=./models/gpt2
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
import warnings
|
| 40 |
+
from typing import Annotated, Optional, Union
|
| 41 |
+
|
| 42 |
+
import typer
|
| 43 |
+
|
| 44 |
+
from huggingface_hub import logging
|
| 45 |
+
from huggingface_hub._snapshot_download import snapshot_download
|
| 46 |
+
from huggingface_hub.file_download import DryRunFileInfo, hf_hub_download
|
| 47 |
+
from huggingface_hub.utils import _format_size, disable_progress_bars, enable_progress_bars, tabulate
|
| 48 |
+
|
| 49 |
+
from ._cli_utils import RepoIdArg, RepoTypeOpt, RevisionOpt, TokenOpt
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
DOWNLOAD_EXAMPLES = [
|
| 53 |
+
"hf download meta-llama/Llama-3.2-1B-Instruct",
|
| 54 |
+
"hf download meta-llama/Llama-3.2-1B-Instruct config.json tokenizer.json",
|
| 55 |
+
'hf download meta-llama/Llama-3.2-1B-Instruct --include "*.safetensors" --exclude "*.bin"',
|
| 56 |
+
"hf download meta-llama/Llama-3.2-1B-Instruct --local-dir ./models/llama",
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
logger = logging.get_logger(__name__)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def download(
|
| 64 |
+
repo_id: RepoIdArg,
|
| 65 |
+
filenames: Annotated[
|
| 66 |
+
Optional[list[str]],
|
| 67 |
+
typer.Argument(
|
| 68 |
+
help="Files to download (e.g. `config.json`, `data/metadata.jsonl`).",
|
| 69 |
+
),
|
| 70 |
+
] = None,
|
| 71 |
+
repo_type: RepoTypeOpt = RepoTypeOpt.model,
|
| 72 |
+
revision: RevisionOpt = None,
|
| 73 |
+
include: Annotated[
|
| 74 |
+
Optional[list[str]],
|
| 75 |
+
typer.Option(
|
| 76 |
+
help="Glob patterns to include from files to download. eg: *.json",
|
| 77 |
+
),
|
| 78 |
+
] = None,
|
| 79 |
+
exclude: Annotated[
|
| 80 |
+
Optional[list[str]],
|
| 81 |
+
typer.Option(
|
| 82 |
+
help="Glob patterns to exclude from files to download.",
|
| 83 |
+
),
|
| 84 |
+
] = None,
|
| 85 |
+
cache_dir: Annotated[
|
| 86 |
+
Optional[str],
|
| 87 |
+
typer.Option(
|
| 88 |
+
help="Directory where to save files.",
|
| 89 |
+
),
|
| 90 |
+
] = None,
|
| 91 |
+
local_dir: Annotated[
|
| 92 |
+
Optional[str],
|
| 93 |
+
typer.Option(
|
| 94 |
+
help="If set, the downloaded file will be placed under this directory. Check out https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-a-local-folder for more details.",
|
| 95 |
+
),
|
| 96 |
+
] = None,
|
| 97 |
+
force_download: Annotated[
|
| 98 |
+
bool,
|
| 99 |
+
typer.Option(
|
| 100 |
+
help="If True, the files will be downloaded even if they are already cached.",
|
| 101 |
+
),
|
| 102 |
+
] = False,
|
| 103 |
+
dry_run: Annotated[
|
| 104 |
+
bool,
|
| 105 |
+
typer.Option(
|
| 106 |
+
help="If True, perform a dry run without actually downloading the file.",
|
| 107 |
+
),
|
| 108 |
+
] = False,
|
| 109 |
+
token: TokenOpt = None,
|
| 110 |
+
quiet: Annotated[
|
| 111 |
+
bool,
|
| 112 |
+
typer.Option(
|
| 113 |
+
help="If True, progress bars are disabled and only the path to the download files is printed.",
|
| 114 |
+
),
|
| 115 |
+
] = False,
|
| 116 |
+
max_workers: Annotated[
|
| 117 |
+
int,
|
| 118 |
+
typer.Option(
|
| 119 |
+
help="Maximum number of workers to use for downloading files. Default is 8.",
|
| 120 |
+
),
|
| 121 |
+
] = 8,
|
| 122 |
+
) -> None:
|
| 123 |
+
"""Download files from the Hub."""
|
| 124 |
+
|
| 125 |
+
def run_download() -> Union[str, DryRunFileInfo, list[DryRunFileInfo]]:
|
| 126 |
+
filenames_list = filenames if filenames is not None else []
|
| 127 |
+
# Warn user if patterns are ignored
|
| 128 |
+
if len(filenames_list) > 0:
|
| 129 |
+
if include is not None and len(include) > 0:
|
| 130 |
+
warnings.warn("Ignoring `--include` since filenames have being explicitly set.")
|
| 131 |
+
if exclude is not None and len(exclude) > 0:
|
| 132 |
+
warnings.warn("Ignoring `--exclude` since filenames have being explicitly set.")
|
| 133 |
+
|
| 134 |
+
# Single file to download: use `hf_hub_download`
|
| 135 |
+
if len(filenames_list) == 1:
|
| 136 |
+
return hf_hub_download(
|
| 137 |
+
repo_id=repo_id,
|
| 138 |
+
repo_type=repo_type.value,
|
| 139 |
+
revision=revision,
|
| 140 |
+
filename=filenames_list[0],
|
| 141 |
+
cache_dir=cache_dir,
|
| 142 |
+
force_download=force_download,
|
| 143 |
+
token=token,
|
| 144 |
+
local_dir=local_dir,
|
| 145 |
+
library_name="huggingface-cli",
|
| 146 |
+
dry_run=dry_run,
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# Otherwise: use `snapshot_download` to ensure all files comes from same revision
|
| 150 |
+
if len(filenames_list) == 0:
|
| 151 |
+
allow_patterns = include
|
| 152 |
+
ignore_patterns = exclude
|
| 153 |
+
else:
|
| 154 |
+
allow_patterns = filenames_list
|
| 155 |
+
ignore_patterns = None
|
| 156 |
+
|
| 157 |
+
return snapshot_download(
|
| 158 |
+
repo_id=repo_id,
|
| 159 |
+
repo_type=repo_type.value,
|
| 160 |
+
revision=revision,
|
| 161 |
+
allow_patterns=allow_patterns,
|
| 162 |
+
ignore_patterns=ignore_patterns,
|
| 163 |
+
force_download=force_download,
|
| 164 |
+
cache_dir=cache_dir,
|
| 165 |
+
token=token,
|
| 166 |
+
local_dir=local_dir,
|
| 167 |
+
library_name="huggingface-cli",
|
| 168 |
+
max_workers=max_workers,
|
| 169 |
+
dry_run=dry_run,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
def _print_result(result: Union[str, DryRunFileInfo, list[DryRunFileInfo]]) -> None:
|
| 173 |
+
if isinstance(result, str):
|
| 174 |
+
print(result)
|
| 175 |
+
return
|
| 176 |
+
|
| 177 |
+
# Print dry run info
|
| 178 |
+
if isinstance(result, DryRunFileInfo):
|
| 179 |
+
result = [result]
|
| 180 |
+
print(
|
| 181 |
+
f"[dry-run] Will download {len([r for r in result if r.will_download])} files (out of {len(result)}) totalling {_format_size(sum(r.file_size for r in result if r.will_download))}."
|
| 182 |
+
)
|
| 183 |
+
columns = ["File", "Bytes to download"]
|
| 184 |
+
items: list[list[Union[str, int]]] = []
|
| 185 |
+
for info in sorted(result, key=lambda x: x.filename):
|
| 186 |
+
items.append([info.filename, _format_size(info.file_size) if info.will_download else "-"])
|
| 187 |
+
print(tabulate(items, headers=columns))
|
| 188 |
+
|
| 189 |
+
if quiet:
|
| 190 |
+
disable_progress_bars()
|
| 191 |
+
with warnings.catch_warnings():
|
| 192 |
+
warnings.simplefilter("ignore")
|
| 193 |
+
_print_result(run_download())
|
| 194 |
+
enable_progress_bars()
|
| 195 |
+
else:
|
| 196 |
+
_print_result(run_download())
|
| 197 |
+
logging.set_verbosity_warning()
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/hf.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
import traceback
|
| 17 |
+
|
| 18 |
+
from huggingface_hub import constants
|
| 19 |
+
from huggingface_hub.cli._cli_utils import check_cli_update, typer_factory
|
| 20 |
+
from huggingface_hub.cli._errors import format_known_exception
|
| 21 |
+
from huggingface_hub.cli.auth import auth_cli
|
| 22 |
+
from huggingface_hub.cli.cache import cache_cli
|
| 23 |
+
from huggingface_hub.cli.collections import collections_cli
|
| 24 |
+
from huggingface_hub.cli.datasets import datasets_cli
|
| 25 |
+
from huggingface_hub.cli.download import DOWNLOAD_EXAMPLES, download
|
| 26 |
+
from huggingface_hub.cli.inference_endpoints import ie_cli
|
| 27 |
+
from huggingface_hub.cli.jobs import jobs_cli
|
| 28 |
+
from huggingface_hub.cli.lfs import lfs_enable_largefiles, lfs_multipart_upload
|
| 29 |
+
from huggingface_hub.cli.models import models_cli
|
| 30 |
+
from huggingface_hub.cli.papers import papers_cli
|
| 31 |
+
from huggingface_hub.cli.repo import repo_cli
|
| 32 |
+
from huggingface_hub.cli.repo_files import repo_files_cli
|
| 33 |
+
from huggingface_hub.cli.skills import skills_cli
|
| 34 |
+
from huggingface_hub.cli.spaces import spaces_cli
|
| 35 |
+
from huggingface_hub.cli.system import env, version
|
| 36 |
+
from huggingface_hub.cli.upload import UPLOAD_EXAMPLES, upload
|
| 37 |
+
from huggingface_hub.cli.upload_large_folder import UPLOAD_LARGE_FOLDER_EXAMPLES, upload_large_folder
|
| 38 |
+
from huggingface_hub.errors import CLIError
|
| 39 |
+
from huggingface_hub.utils import ANSI, logging
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
app = typer_factory(help="Hugging Face Hub CLI")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# top level single commands (defined in their respective files)
|
| 46 |
+
app.command(examples=DOWNLOAD_EXAMPLES)(download)
|
| 47 |
+
app.command(examples=UPLOAD_EXAMPLES)(upload)
|
| 48 |
+
app.command(examples=UPLOAD_LARGE_FOLDER_EXAMPLES)(upload_large_folder)
|
| 49 |
+
|
| 50 |
+
app.command(topic="help")(env)
|
| 51 |
+
app.command(topic="help")(version)
|
| 52 |
+
|
| 53 |
+
app.command(hidden=True)(lfs_enable_largefiles)
|
| 54 |
+
app.command(hidden=True)(lfs_multipart_upload)
|
| 55 |
+
|
| 56 |
+
# command groups
|
| 57 |
+
app.add_typer(auth_cli, name="auth")
|
| 58 |
+
app.add_typer(cache_cli, name="cache")
|
| 59 |
+
app.add_typer(collections_cli, name="collections")
|
| 60 |
+
app.add_typer(datasets_cli, name="datasets")
|
| 61 |
+
app.add_typer(jobs_cli, name="jobs")
|
| 62 |
+
app.add_typer(models_cli, name="models")
|
| 63 |
+
app.add_typer(papers_cli, name="papers")
|
| 64 |
+
app.add_typer(repo_cli, name="repo")
|
| 65 |
+
app.add_typer(repo_files_cli, name="repo-files")
|
| 66 |
+
app.add_typer(skills_cli, name="skills")
|
| 67 |
+
app.add_typer(spaces_cli, name="spaces")
|
| 68 |
+
app.add_typer(ie_cli, name="endpoints")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def main():
|
| 72 |
+
if not constants.HF_DEBUG:
|
| 73 |
+
logging.set_verbosity_info()
|
| 74 |
+
check_cli_update("huggingface_hub")
|
| 75 |
+
|
| 76 |
+
try:
|
| 77 |
+
app()
|
| 78 |
+
except CLIError as e:
|
| 79 |
+
print(f"Error: {e}", file=sys.stderr)
|
| 80 |
+
if constants.HF_DEBUG:
|
| 81 |
+
traceback.print_exc()
|
| 82 |
+
else:
|
| 83 |
+
print(ANSI.gray("Set HF_DEBUG=1 as environment variable for full traceback."))
|
| 84 |
+
sys.exit(1)
|
| 85 |
+
except Exception as e:
|
| 86 |
+
message = format_known_exception(e)
|
| 87 |
+
if message:
|
| 88 |
+
print(f"Error: {message}", file=sys.stderr)
|
| 89 |
+
if constants.HF_DEBUG:
|
| 90 |
+
traceback.print_exc()
|
| 91 |
+
else:
|
| 92 |
+
print(ANSI.gray("Set HF_DEBUG=1 as environment variable for full traceback."))
|
| 93 |
+
sys.exit(1)
|
| 94 |
+
raise
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
if __name__ == "__main__":
|
| 98 |
+
main()
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/inference_endpoints.py
ADDED
|
@@ -0,0 +1,456 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI commands for Hugging Face Inference Endpoints."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
from typing import Annotated, Any, Optional
|
| 5 |
+
|
| 6 |
+
import typer
|
| 7 |
+
|
| 8 |
+
from huggingface_hub._inference_endpoints import InferenceEndpoint, InferenceEndpointScalingMetric
|
| 9 |
+
from huggingface_hub.errors import HfHubHTTPError
|
| 10 |
+
|
| 11 |
+
from ._cli_utils import (
|
| 12 |
+
FormatOpt,
|
| 13 |
+
OutputFormat,
|
| 14 |
+
QuietOpt,
|
| 15 |
+
TokenOpt,
|
| 16 |
+
get_hf_api,
|
| 17 |
+
print_list_output,
|
| 18 |
+
typer_factory,
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
ie_cli = typer_factory(help="Manage Hugging Face Inference Endpoints.")
|
| 23 |
+
|
| 24 |
+
catalog_app = typer_factory(help="Interact with the Inference Endpoints catalog.")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
NameArg = Annotated[
|
| 28 |
+
str,
|
| 29 |
+
typer.Argument(help="Endpoint name."),
|
| 30 |
+
]
|
| 31 |
+
NameOpt = Annotated[
|
| 32 |
+
Optional[str],
|
| 33 |
+
typer.Option(help="Endpoint name."),
|
| 34 |
+
]
|
| 35 |
+
|
| 36 |
+
NamespaceOpt = Annotated[
|
| 37 |
+
Optional[str],
|
| 38 |
+
typer.Option(
|
| 39 |
+
help="The namespace associated with the Inference Endpoint. Defaults to the current user's namespace.",
|
| 40 |
+
),
|
| 41 |
+
]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def _print_endpoint(endpoint: InferenceEndpoint) -> None:
|
| 45 |
+
typer.echo(json.dumps(endpoint.raw, indent=2, sort_keys=True))
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@ie_cli.command(examples=["hf endpoints ls", "hf endpoints ls --namespace my-org"])
|
| 49 |
+
def ls(
|
| 50 |
+
namespace: NamespaceOpt = None,
|
| 51 |
+
format: FormatOpt = OutputFormat.table,
|
| 52 |
+
quiet: QuietOpt = False,
|
| 53 |
+
token: TokenOpt = None,
|
| 54 |
+
) -> None:
|
| 55 |
+
"""Lists all Inference Endpoints for the given namespace."""
|
| 56 |
+
api = get_hf_api(token=token)
|
| 57 |
+
try:
|
| 58 |
+
endpoints = api.list_inference_endpoints(namespace=namespace, token=token)
|
| 59 |
+
except HfHubHTTPError as error:
|
| 60 |
+
typer.echo(f"Listing failed: {error}")
|
| 61 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 62 |
+
|
| 63 |
+
results = [endpoint.raw for endpoint in endpoints]
|
| 64 |
+
|
| 65 |
+
def row_fn(item: dict[str, Any]) -> list[str]:
|
| 66 |
+
status = item.get("status", {})
|
| 67 |
+
model = item.get("model", {})
|
| 68 |
+
compute = item.get("compute", {})
|
| 69 |
+
provider = item.get("provider", {})
|
| 70 |
+
return [
|
| 71 |
+
str(item.get("name", "")),
|
| 72 |
+
str(model.get("repository", "") if isinstance(model, dict) else ""),
|
| 73 |
+
str(status.get("state", "") if isinstance(status, dict) else ""),
|
| 74 |
+
str(model.get("task", "") if isinstance(model, dict) else ""),
|
| 75 |
+
str(model.get("framework", "") if isinstance(model, dict) else ""),
|
| 76 |
+
str(compute.get("instanceType", "") if isinstance(compute, dict) else ""),
|
| 77 |
+
str(provider.get("vendor", "") if isinstance(provider, dict) else ""),
|
| 78 |
+
str(provider.get("region", "") if isinstance(provider, dict) else ""),
|
| 79 |
+
]
|
| 80 |
+
|
| 81 |
+
print_list_output(
|
| 82 |
+
items=results,
|
| 83 |
+
format=format,
|
| 84 |
+
quiet=quiet,
|
| 85 |
+
id_key="name",
|
| 86 |
+
headers=["NAME", "MODEL", "STATUS", "TASK", "FRAMEWORK", "INSTANCE", "VENDOR", "REGION"],
|
| 87 |
+
row_fn=row_fn,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@ie_cli.command(name="deploy", examples=["hf endpoints deploy my-endpoint --repo gpt2 --framework pytorch ..."])
|
| 92 |
+
def deploy(
|
| 93 |
+
name: NameArg,
|
| 94 |
+
repo: Annotated[
|
| 95 |
+
str,
|
| 96 |
+
typer.Option(
|
| 97 |
+
help="The name of the model repository associated with the Inference Endpoint (e.g. 'openai/gpt-oss-120b').",
|
| 98 |
+
),
|
| 99 |
+
],
|
| 100 |
+
framework: Annotated[
|
| 101 |
+
str,
|
| 102 |
+
typer.Option(
|
| 103 |
+
help="The machine learning framework used for the model (e.g. 'vllm').",
|
| 104 |
+
),
|
| 105 |
+
],
|
| 106 |
+
accelerator: Annotated[
|
| 107 |
+
str,
|
| 108 |
+
typer.Option(
|
| 109 |
+
help="The hardware accelerator to be used for inference (e.g. 'cpu').",
|
| 110 |
+
),
|
| 111 |
+
],
|
| 112 |
+
instance_size: Annotated[
|
| 113 |
+
str,
|
| 114 |
+
typer.Option(
|
| 115 |
+
help="The size or type of the instance to be used for hosting the model (e.g. 'x4').",
|
| 116 |
+
),
|
| 117 |
+
],
|
| 118 |
+
instance_type: Annotated[
|
| 119 |
+
str,
|
| 120 |
+
typer.Option(
|
| 121 |
+
help="The cloud instance type where the Inference Endpoint will be deployed (e.g. 'intel-icl').",
|
| 122 |
+
),
|
| 123 |
+
],
|
| 124 |
+
region: Annotated[
|
| 125 |
+
str,
|
| 126 |
+
typer.Option(
|
| 127 |
+
help="The cloud region in which the Inference Endpoint will be created (e.g. 'us-east-1').",
|
| 128 |
+
),
|
| 129 |
+
],
|
| 130 |
+
vendor: Annotated[
|
| 131 |
+
str,
|
| 132 |
+
typer.Option(
|
| 133 |
+
help="The cloud provider or vendor where the Inference Endpoint will be hosted (e.g. 'aws').",
|
| 134 |
+
),
|
| 135 |
+
],
|
| 136 |
+
*,
|
| 137 |
+
namespace: NamespaceOpt = None,
|
| 138 |
+
task: Annotated[
|
| 139 |
+
Optional[str],
|
| 140 |
+
typer.Option(
|
| 141 |
+
help="The task on which to deploy the model (e.g. 'text-classification').",
|
| 142 |
+
),
|
| 143 |
+
] = None,
|
| 144 |
+
token: TokenOpt = None,
|
| 145 |
+
min_replica: Annotated[
|
| 146 |
+
int,
|
| 147 |
+
typer.Option(
|
| 148 |
+
help="The minimum number of replicas (instances) to keep running for the Inference Endpoint.",
|
| 149 |
+
),
|
| 150 |
+
] = 1,
|
| 151 |
+
max_replica: Annotated[
|
| 152 |
+
int,
|
| 153 |
+
typer.Option(
|
| 154 |
+
help="The maximum number of replicas (instances) to scale to for the Inference Endpoint.",
|
| 155 |
+
),
|
| 156 |
+
] = 1,
|
| 157 |
+
scale_to_zero_timeout: Annotated[
|
| 158 |
+
Optional[int],
|
| 159 |
+
typer.Option(
|
| 160 |
+
help="The duration in minutes before an inactive endpoint is scaled to zero.",
|
| 161 |
+
),
|
| 162 |
+
] = None,
|
| 163 |
+
scaling_metric: Annotated[
|
| 164 |
+
Optional[InferenceEndpointScalingMetric],
|
| 165 |
+
typer.Option(
|
| 166 |
+
help="The metric reference for scaling.",
|
| 167 |
+
),
|
| 168 |
+
] = None,
|
| 169 |
+
scaling_threshold: Annotated[
|
| 170 |
+
Optional[float],
|
| 171 |
+
typer.Option(
|
| 172 |
+
help="The scaling metric threshold used to trigger a scale up. Ignored when scaling metric is not provided.",
|
| 173 |
+
),
|
| 174 |
+
] = None,
|
| 175 |
+
) -> None:
|
| 176 |
+
"""Deploy an Inference Endpoint from a Hub repository."""
|
| 177 |
+
api = get_hf_api(token=token)
|
| 178 |
+
endpoint = api.create_inference_endpoint(
|
| 179 |
+
name=name,
|
| 180 |
+
repository=repo,
|
| 181 |
+
framework=framework,
|
| 182 |
+
accelerator=accelerator,
|
| 183 |
+
instance_size=instance_size,
|
| 184 |
+
instance_type=instance_type,
|
| 185 |
+
region=region,
|
| 186 |
+
vendor=vendor,
|
| 187 |
+
namespace=namespace,
|
| 188 |
+
task=task,
|
| 189 |
+
token=token,
|
| 190 |
+
min_replica=min_replica,
|
| 191 |
+
max_replica=max_replica,
|
| 192 |
+
scaling_metric=scaling_metric,
|
| 193 |
+
scaling_threshold=scaling_threshold,
|
| 194 |
+
scale_to_zero_timeout=scale_to_zero_timeout,
|
| 195 |
+
)
|
| 196 |
+
|
| 197 |
+
_print_endpoint(endpoint)
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@catalog_app.command(name="deploy", examples=["hf endpoints catalog deploy --repo meta-llama/Llama-3.2-1B-Instruct"])
|
| 201 |
+
def deploy_from_catalog(
|
| 202 |
+
repo: Annotated[
|
| 203 |
+
str,
|
| 204 |
+
typer.Option(
|
| 205 |
+
help="The name of the model repository associated with the Inference Endpoint (e.g. 'openai/gpt-oss-120b').",
|
| 206 |
+
),
|
| 207 |
+
],
|
| 208 |
+
name: NameOpt = None,
|
| 209 |
+
namespace: NamespaceOpt = None,
|
| 210 |
+
token: TokenOpt = None,
|
| 211 |
+
) -> None:
|
| 212 |
+
"""Deploy an Inference Endpoint from the Model Catalog."""
|
| 213 |
+
api = get_hf_api(token=token)
|
| 214 |
+
try:
|
| 215 |
+
endpoint = api.create_inference_endpoint_from_catalog(
|
| 216 |
+
repo_id=repo,
|
| 217 |
+
name=name,
|
| 218 |
+
namespace=namespace,
|
| 219 |
+
token=token,
|
| 220 |
+
)
|
| 221 |
+
except HfHubHTTPError as error:
|
| 222 |
+
typer.echo(f"Deployment failed: {error}")
|
| 223 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 224 |
+
|
| 225 |
+
_print_endpoint(endpoint)
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def list_catalog(
|
| 229 |
+
token: TokenOpt = None,
|
| 230 |
+
) -> None:
|
| 231 |
+
"""List available Catalog models."""
|
| 232 |
+
api = get_hf_api(token=token)
|
| 233 |
+
try:
|
| 234 |
+
models = api.list_inference_catalog(token=token)
|
| 235 |
+
except HfHubHTTPError as error:
|
| 236 |
+
typer.echo(f"Catalog fetch failed: {error}")
|
| 237 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 238 |
+
|
| 239 |
+
typer.echo(json.dumps({"models": models}, indent=2, sort_keys=True))
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
catalog_app.command(name="ls", examples=["hf endpoints catalog ls"])(list_catalog)
|
| 243 |
+
ie_cli.command(name="list-catalog", hidden=True)(list_catalog)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
ie_cli.add_typer(catalog_app, name="catalog")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
@ie_cli.command(examples=["hf endpoints describe my-endpoint"])
|
| 250 |
+
def describe(
|
| 251 |
+
name: NameArg,
|
| 252 |
+
namespace: NamespaceOpt = None,
|
| 253 |
+
token: TokenOpt = None,
|
| 254 |
+
) -> None:
|
| 255 |
+
"""Get information about an existing endpoint."""
|
| 256 |
+
api = get_hf_api(token=token)
|
| 257 |
+
try:
|
| 258 |
+
endpoint = api.get_inference_endpoint(name=name, namespace=namespace, token=token)
|
| 259 |
+
except HfHubHTTPError as error:
|
| 260 |
+
typer.echo(f"Fetch failed: {error}")
|
| 261 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 262 |
+
|
| 263 |
+
_print_endpoint(endpoint)
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
@ie_cli.command(examples=["hf endpoints update my-endpoint --min-replica 2"])
|
| 267 |
+
def update(
|
| 268 |
+
name: NameArg,
|
| 269 |
+
namespace: NamespaceOpt = None,
|
| 270 |
+
repo: Annotated[
|
| 271 |
+
Optional[str],
|
| 272 |
+
typer.Option(
|
| 273 |
+
help="The name of the model repository associated with the Inference Endpoint (e.g. 'openai/gpt-oss-120b').",
|
| 274 |
+
),
|
| 275 |
+
] = None,
|
| 276 |
+
accelerator: Annotated[
|
| 277 |
+
Optional[str],
|
| 278 |
+
typer.Option(
|
| 279 |
+
help="The hardware accelerator to be used for inference (e.g. 'cpu').",
|
| 280 |
+
),
|
| 281 |
+
] = None,
|
| 282 |
+
instance_size: Annotated[
|
| 283 |
+
Optional[str],
|
| 284 |
+
typer.Option(
|
| 285 |
+
help="The size or type of the instance to be used for hosting the model (e.g. 'x4').",
|
| 286 |
+
),
|
| 287 |
+
] = None,
|
| 288 |
+
instance_type: Annotated[
|
| 289 |
+
Optional[str],
|
| 290 |
+
typer.Option(
|
| 291 |
+
help="The cloud instance type where the Inference Endpoint will be deployed (e.g. 'intel-icl').",
|
| 292 |
+
),
|
| 293 |
+
] = None,
|
| 294 |
+
framework: Annotated[
|
| 295 |
+
Optional[str],
|
| 296 |
+
typer.Option(
|
| 297 |
+
help="The machine learning framework used for the model (e.g. 'custom').",
|
| 298 |
+
),
|
| 299 |
+
] = None,
|
| 300 |
+
revision: Annotated[
|
| 301 |
+
Optional[str],
|
| 302 |
+
typer.Option(
|
| 303 |
+
help="The specific model revision to deploy on the Inference Endpoint (e.g. '6c0e6080953db56375760c0471a8c5f2929baf11').",
|
| 304 |
+
),
|
| 305 |
+
] = None,
|
| 306 |
+
task: Annotated[
|
| 307 |
+
Optional[str],
|
| 308 |
+
typer.Option(
|
| 309 |
+
help="The task on which to deploy the model (e.g. 'text-classification').",
|
| 310 |
+
),
|
| 311 |
+
] = None,
|
| 312 |
+
min_replica: Annotated[
|
| 313 |
+
Optional[int],
|
| 314 |
+
typer.Option(
|
| 315 |
+
help="The minimum number of replicas (instances) to keep running for the Inference Endpoint.",
|
| 316 |
+
),
|
| 317 |
+
] = None,
|
| 318 |
+
max_replica: Annotated[
|
| 319 |
+
Optional[int],
|
| 320 |
+
typer.Option(
|
| 321 |
+
help="The maximum number of replicas (instances) to scale to for the Inference Endpoint.",
|
| 322 |
+
),
|
| 323 |
+
] = None,
|
| 324 |
+
scale_to_zero_timeout: Annotated[
|
| 325 |
+
Optional[int],
|
| 326 |
+
typer.Option(
|
| 327 |
+
help="The duration in minutes before an inactive endpoint is scaled to zero.",
|
| 328 |
+
),
|
| 329 |
+
] = None,
|
| 330 |
+
scaling_metric: Annotated[
|
| 331 |
+
Optional[InferenceEndpointScalingMetric],
|
| 332 |
+
typer.Option(
|
| 333 |
+
help="The metric reference for scaling.",
|
| 334 |
+
),
|
| 335 |
+
] = None,
|
| 336 |
+
scaling_threshold: Annotated[
|
| 337 |
+
Optional[float],
|
| 338 |
+
typer.Option(
|
| 339 |
+
help="The scaling metric threshold used to trigger a scale up. Ignored when scaling metric is not provided.",
|
| 340 |
+
),
|
| 341 |
+
] = None,
|
| 342 |
+
token: TokenOpt = None,
|
| 343 |
+
) -> None:
|
| 344 |
+
"""Update an existing endpoint."""
|
| 345 |
+
api = get_hf_api(token=token)
|
| 346 |
+
try:
|
| 347 |
+
endpoint = api.update_inference_endpoint(
|
| 348 |
+
name=name,
|
| 349 |
+
namespace=namespace,
|
| 350 |
+
repository=repo,
|
| 351 |
+
framework=framework,
|
| 352 |
+
revision=revision,
|
| 353 |
+
task=task,
|
| 354 |
+
accelerator=accelerator,
|
| 355 |
+
instance_size=instance_size,
|
| 356 |
+
instance_type=instance_type,
|
| 357 |
+
min_replica=min_replica,
|
| 358 |
+
max_replica=max_replica,
|
| 359 |
+
scale_to_zero_timeout=scale_to_zero_timeout,
|
| 360 |
+
scaling_metric=scaling_metric,
|
| 361 |
+
scaling_threshold=scaling_threshold,
|
| 362 |
+
token=token,
|
| 363 |
+
)
|
| 364 |
+
except HfHubHTTPError as error:
|
| 365 |
+
typer.echo(f"Update failed: {error}")
|
| 366 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 367 |
+
_print_endpoint(endpoint)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
@ie_cli.command(examples=["hf endpoints delete my-endpoint"])
|
| 371 |
+
def delete(
|
| 372 |
+
name: NameArg,
|
| 373 |
+
namespace: NamespaceOpt = None,
|
| 374 |
+
yes: Annotated[
|
| 375 |
+
bool,
|
| 376 |
+
typer.Option("--yes", help="Skip confirmation prompts."),
|
| 377 |
+
] = False,
|
| 378 |
+
token: TokenOpt = None,
|
| 379 |
+
) -> None:
|
| 380 |
+
"""Delete an Inference Endpoint permanently."""
|
| 381 |
+
if not yes:
|
| 382 |
+
confirmation = typer.prompt(f"Delete endpoint '{name}'? Type the name to confirm.")
|
| 383 |
+
if confirmation != name:
|
| 384 |
+
typer.echo("Aborted.")
|
| 385 |
+
raise typer.Exit(code=2)
|
| 386 |
+
|
| 387 |
+
api = get_hf_api(token=token)
|
| 388 |
+
try:
|
| 389 |
+
api.delete_inference_endpoint(name=name, namespace=namespace, token=token)
|
| 390 |
+
except HfHubHTTPError as error:
|
| 391 |
+
typer.echo(f"Delete failed: {error}")
|
| 392 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 393 |
+
|
| 394 |
+
typer.echo(f"Deleted '{name}'.")
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
@ie_cli.command(examples=["hf endpoints pause my-endpoint"])
|
| 398 |
+
def pause(
|
| 399 |
+
name: NameArg,
|
| 400 |
+
namespace: NamespaceOpt = None,
|
| 401 |
+
token: TokenOpt = None,
|
| 402 |
+
) -> None:
|
| 403 |
+
"""Pause an Inference Endpoint."""
|
| 404 |
+
api = get_hf_api(token=token)
|
| 405 |
+
try:
|
| 406 |
+
endpoint = api.pause_inference_endpoint(name=name, namespace=namespace, token=token)
|
| 407 |
+
except HfHubHTTPError as error:
|
| 408 |
+
typer.echo(f"Pause failed: {error}")
|
| 409 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 410 |
+
|
| 411 |
+
_print_endpoint(endpoint)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
@ie_cli.command(examples=["hf endpoints resume my-endpoint"])
|
| 415 |
+
def resume(
|
| 416 |
+
name: NameArg,
|
| 417 |
+
namespace: NamespaceOpt = None,
|
| 418 |
+
fail_if_already_running: Annotated[
|
| 419 |
+
bool,
|
| 420 |
+
typer.Option(
|
| 421 |
+
"--fail-if-already-running",
|
| 422 |
+
help="If `True`, the method will raise an error if the Inference Endpoint is already running.",
|
| 423 |
+
),
|
| 424 |
+
] = False,
|
| 425 |
+
token: TokenOpt = None,
|
| 426 |
+
) -> None:
|
| 427 |
+
"""Resume an Inference Endpoint."""
|
| 428 |
+
api = get_hf_api(token=token)
|
| 429 |
+
try:
|
| 430 |
+
endpoint = api.resume_inference_endpoint(
|
| 431 |
+
name=name,
|
| 432 |
+
namespace=namespace,
|
| 433 |
+
token=token,
|
| 434 |
+
running_ok=not fail_if_already_running,
|
| 435 |
+
)
|
| 436 |
+
except HfHubHTTPError as error:
|
| 437 |
+
typer.echo(f"Resume failed: {error}")
|
| 438 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 439 |
+
_print_endpoint(endpoint)
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
@ie_cli.command(examples=["hf endpoints scale-to-zero my-endpoint"])
|
| 443 |
+
def scale_to_zero(
|
| 444 |
+
name: NameArg,
|
| 445 |
+
namespace: NamespaceOpt = None,
|
| 446 |
+
token: TokenOpt = None,
|
| 447 |
+
) -> None:
|
| 448 |
+
"""Scale an Inference Endpoint to zero."""
|
| 449 |
+
api = get_hf_api(token=token)
|
| 450 |
+
try:
|
| 451 |
+
endpoint = api.scale_to_zero_inference_endpoint(name=name, namespace=namespace, token=token)
|
| 452 |
+
except HfHubHTTPError as error:
|
| 453 |
+
typer.echo(f"Scale To Zero failed: {error}")
|
| 454 |
+
raise typer.Exit(code=error.response.status_code) from error
|
| 455 |
+
|
| 456 |
+
_print_endpoint(endpoint)
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/jobs.py
ADDED
|
@@ -0,0 +1,1078 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to interact with jobs on the Hugging Face Hub.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# run a job
|
| 18 |
+
hf jobs run <image> <command>
|
| 19 |
+
|
| 20 |
+
# List running or completed jobs
|
| 21 |
+
hf jobs ps [-a] [-f key=value] [--format TEMPLATE]
|
| 22 |
+
|
| 23 |
+
# Stream logs from a job
|
| 24 |
+
hf jobs logs <job-id>
|
| 25 |
+
|
| 26 |
+
# Stream resources usage stats and metrics from a job
|
| 27 |
+
hf jobs stats <job-id>
|
| 28 |
+
|
| 29 |
+
# Inspect detailed information about a job
|
| 30 |
+
hf jobs inspect <job-id>
|
| 31 |
+
|
| 32 |
+
# Cancel a running job
|
| 33 |
+
hf jobs cancel <job-id>
|
| 34 |
+
|
| 35 |
+
# List available hardware options
|
| 36 |
+
hf jobs hardware
|
| 37 |
+
|
| 38 |
+
# Run a UV script
|
| 39 |
+
hf jobs uv run <script>
|
| 40 |
+
|
| 41 |
+
# Schedule a job
|
| 42 |
+
hf jobs scheduled run <schedule> <image> <command>
|
| 43 |
+
|
| 44 |
+
# List scheduled jobs
|
| 45 |
+
hf jobs scheduled ps [-a] [-f key=value] [--format TEMPLATE]
|
| 46 |
+
|
| 47 |
+
# Inspect a scheduled job
|
| 48 |
+
hf jobs scheduled inspect <scheduled_job_id>
|
| 49 |
+
|
| 50 |
+
# Suspend a scheduled job
|
| 51 |
+
hf jobs scheduled suspend <scheduled_job_id>
|
| 52 |
+
|
| 53 |
+
# Resume a scheduled job
|
| 54 |
+
hf jobs scheduled resume <scheduled_job_id>
|
| 55 |
+
|
| 56 |
+
# Delete a scheduled job
|
| 57 |
+
hf jobs scheduled delete <scheduled_job_id>
|
| 58 |
+
|
| 59 |
+
"""
|
| 60 |
+
|
| 61 |
+
import json
|
| 62 |
+
import multiprocessing
|
| 63 |
+
import multiprocessing.pool
|
| 64 |
+
import os
|
| 65 |
+
import time
|
| 66 |
+
from dataclasses import asdict
|
| 67 |
+
from fnmatch import fnmatch
|
| 68 |
+
from pathlib import Path
|
| 69 |
+
from queue import Empty, Queue
|
| 70 |
+
from typing import Annotated, Any, Callable, Dict, Iterable, Optional, TypeVar, Union
|
| 71 |
+
|
| 72 |
+
import typer
|
| 73 |
+
|
| 74 |
+
from huggingface_hub import SpaceHardware, get_token
|
| 75 |
+
from huggingface_hub.errors import CLIError, HfHubHTTPError
|
| 76 |
+
from huggingface_hub.utils import logging
|
| 77 |
+
from huggingface_hub.utils._cache_manager import _format_size
|
| 78 |
+
from huggingface_hub.utils._dotenv import load_dotenv
|
| 79 |
+
|
| 80 |
+
from ._cli_utils import TokenOpt, get_hf_api, typer_factory
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
logger = logging.get_logger(__name__)
|
| 84 |
+
|
| 85 |
+
SUGGESTED_FLAVORS = [item.value for item in SpaceHardware if item.value != "zero-a10g"]
|
| 86 |
+
STATS_UPDATE_MIN_INTERVAL = 0.1 # we set a limit here since there is one update per second per job
|
| 87 |
+
|
| 88 |
+
# Common job-related options
|
| 89 |
+
ImageArg = Annotated[
|
| 90 |
+
str,
|
| 91 |
+
typer.Argument(
|
| 92 |
+
help="The Docker image to use.",
|
| 93 |
+
),
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
ImageOpt = Annotated[
|
| 97 |
+
Optional[str],
|
| 98 |
+
typer.Option(
|
| 99 |
+
help="Use a custom Docker image with `uv` installed.",
|
| 100 |
+
),
|
| 101 |
+
]
|
| 102 |
+
|
| 103 |
+
FlavorOpt = Annotated[
|
| 104 |
+
Optional[SpaceHardware],
|
| 105 |
+
typer.Option(
|
| 106 |
+
help="Flavor for the hardware, as in HF Spaces. Run 'hf jobs hardware' to list available flavors. Defaults to `cpu-basic`.",
|
| 107 |
+
),
|
| 108 |
+
]
|
| 109 |
+
|
| 110 |
+
EnvOpt = Annotated[
|
| 111 |
+
Optional[list[str]],
|
| 112 |
+
typer.Option(
|
| 113 |
+
"-e",
|
| 114 |
+
"--env",
|
| 115 |
+
help="Set environment variables. E.g. --env ENV=value",
|
| 116 |
+
),
|
| 117 |
+
]
|
| 118 |
+
|
| 119 |
+
SecretsOpt = Annotated[
|
| 120 |
+
Optional[list[str]],
|
| 121 |
+
typer.Option(
|
| 122 |
+
"-s",
|
| 123 |
+
"--secrets",
|
| 124 |
+
help="Set secret environment variables. E.g. --secrets SECRET=value or `--secrets HF_TOKEN` to pass your Hugging Face token.",
|
| 125 |
+
),
|
| 126 |
+
]
|
| 127 |
+
|
| 128 |
+
LabelsOpt = Annotated[
|
| 129 |
+
Optional[list[str]],
|
| 130 |
+
typer.Option(
|
| 131 |
+
"-l",
|
| 132 |
+
"--label",
|
| 133 |
+
help="Set labels. E.g. --label KEY=VALUE or --label LABEL",
|
| 134 |
+
),
|
| 135 |
+
]
|
| 136 |
+
|
| 137 |
+
EnvFileOpt = Annotated[
|
| 138 |
+
Optional[str],
|
| 139 |
+
typer.Option(
|
| 140 |
+
"--env-file",
|
| 141 |
+
help="Read in a file of environment variables.",
|
| 142 |
+
),
|
| 143 |
+
]
|
| 144 |
+
|
| 145 |
+
SecretsFileOpt = Annotated[
|
| 146 |
+
Optional[str],
|
| 147 |
+
typer.Option(
|
| 148 |
+
help="Read in a file of secret environment variables.",
|
| 149 |
+
),
|
| 150 |
+
]
|
| 151 |
+
|
| 152 |
+
TimeoutOpt = Annotated[
|
| 153 |
+
Optional[str],
|
| 154 |
+
typer.Option(
|
| 155 |
+
help="Max duration: int/float with s (seconds, default), m (minutes), h (hours) or d (days).",
|
| 156 |
+
),
|
| 157 |
+
]
|
| 158 |
+
|
| 159 |
+
DetachOpt = Annotated[
|
| 160 |
+
bool,
|
| 161 |
+
typer.Option(
|
| 162 |
+
"-d",
|
| 163 |
+
"--detach",
|
| 164 |
+
help="Run the Job in the background and print the Job ID.",
|
| 165 |
+
),
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
NamespaceOpt = Annotated[
|
| 169 |
+
Optional[str],
|
| 170 |
+
typer.Option(
|
| 171 |
+
help="The namespace where the job will be running. Defaults to the current user's namespace.",
|
| 172 |
+
),
|
| 173 |
+
]
|
| 174 |
+
|
| 175 |
+
WithOpt = Annotated[
|
| 176 |
+
Optional[list[str]],
|
| 177 |
+
typer.Option(
|
| 178 |
+
"--with",
|
| 179 |
+
help="Run with the given packages installed",
|
| 180 |
+
),
|
| 181 |
+
]
|
| 182 |
+
|
| 183 |
+
PythonOpt = Annotated[
|
| 184 |
+
Optional[str],
|
| 185 |
+
typer.Option(
|
| 186 |
+
"-p",
|
| 187 |
+
"--python",
|
| 188 |
+
help="The Python interpreter to use for the run environment",
|
| 189 |
+
),
|
| 190 |
+
]
|
| 191 |
+
|
| 192 |
+
SuspendOpt = Annotated[
|
| 193 |
+
Optional[bool],
|
| 194 |
+
typer.Option(
|
| 195 |
+
help="Suspend (pause) the scheduled Job",
|
| 196 |
+
),
|
| 197 |
+
]
|
| 198 |
+
|
| 199 |
+
ConcurrencyOpt = Annotated[
|
| 200 |
+
Optional[bool],
|
| 201 |
+
typer.Option(
|
| 202 |
+
help="Allow multiple instances of this Job to run concurrently",
|
| 203 |
+
),
|
| 204 |
+
]
|
| 205 |
+
|
| 206 |
+
ScheduleArg = Annotated[
|
| 207 |
+
str,
|
| 208 |
+
typer.Argument(
|
| 209 |
+
help="One of annually, yearly, monthly, weekly, daily, hourly, or a CRON schedule expression.",
|
| 210 |
+
),
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
ScriptArg = Annotated[
|
| 214 |
+
str,
|
| 215 |
+
typer.Argument(
|
| 216 |
+
help="UV script to run (local file or URL)",
|
| 217 |
+
),
|
| 218 |
+
]
|
| 219 |
+
|
| 220 |
+
ScriptArgsArg = Annotated[
|
| 221 |
+
Optional[list[str]],
|
| 222 |
+
typer.Argument(
|
| 223 |
+
help="Arguments for the script",
|
| 224 |
+
),
|
| 225 |
+
]
|
| 226 |
+
|
| 227 |
+
CommandArg = Annotated[
|
| 228 |
+
list[str],
|
| 229 |
+
typer.Argument(
|
| 230 |
+
help="The command to run.",
|
| 231 |
+
),
|
| 232 |
+
]
|
| 233 |
+
|
| 234 |
+
JobIdArg = Annotated[
|
| 235 |
+
str,
|
| 236 |
+
typer.Argument(
|
| 237 |
+
help="Job ID",
|
| 238 |
+
),
|
| 239 |
+
]
|
| 240 |
+
|
| 241 |
+
JobIdsArg = Annotated[
|
| 242 |
+
Optional[list[str]],
|
| 243 |
+
typer.Argument(
|
| 244 |
+
help="Job IDs",
|
| 245 |
+
),
|
| 246 |
+
]
|
| 247 |
+
|
| 248 |
+
ScheduledJobIdArg = Annotated[
|
| 249 |
+
str,
|
| 250 |
+
typer.Argument(
|
| 251 |
+
help="Scheduled Job ID",
|
| 252 |
+
),
|
| 253 |
+
]
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
jobs_cli = typer_factory(help="Run and manage Jobs on the Hub.")
|
| 257 |
+
|
| 258 |
+
|
| 259 |
+
@jobs_cli.command(
|
| 260 |
+
"run",
|
| 261 |
+
context_settings={"ignore_unknown_options": True},
|
| 262 |
+
examples=[
|
| 263 |
+
"hf jobs run python:3.12 python -c 'print(\"Hello!\")'",
|
| 264 |
+
"hf jobs run -e FOO=foo python:3.12 python script.py",
|
| 265 |
+
"hf jobs run --secrets HF_TOKEN python:3.12 python script.py",
|
| 266 |
+
],
|
| 267 |
+
)
|
| 268 |
+
def jobs_run(
|
| 269 |
+
image: ImageArg,
|
| 270 |
+
command: CommandArg,
|
| 271 |
+
env: EnvOpt = None,
|
| 272 |
+
secrets: SecretsOpt = None,
|
| 273 |
+
label: LabelsOpt = None,
|
| 274 |
+
env_file: EnvFileOpt = None,
|
| 275 |
+
secrets_file: SecretsFileOpt = None,
|
| 276 |
+
flavor: FlavorOpt = None,
|
| 277 |
+
timeout: TimeoutOpt = None,
|
| 278 |
+
detach: DetachOpt = False,
|
| 279 |
+
namespace: NamespaceOpt = None,
|
| 280 |
+
token: TokenOpt = None,
|
| 281 |
+
) -> None:
|
| 282 |
+
"""Run a Job."""
|
| 283 |
+
env_map: dict[str, Optional[str]] = {}
|
| 284 |
+
if env_file:
|
| 285 |
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
| 286 |
+
for env_value in env or []:
|
| 287 |
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
| 288 |
+
|
| 289 |
+
secrets_map: dict[str, Optional[str]] = {}
|
| 290 |
+
extended_environ = _get_extended_environ()
|
| 291 |
+
if secrets_file:
|
| 292 |
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
| 293 |
+
for secret in secrets or []:
|
| 294 |
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
| 295 |
+
|
| 296 |
+
api = get_hf_api(token=token)
|
| 297 |
+
job = api.run_job(
|
| 298 |
+
image=image,
|
| 299 |
+
command=command,
|
| 300 |
+
env=env_map,
|
| 301 |
+
secrets=secrets_map,
|
| 302 |
+
labels=_parse_labels_map(label),
|
| 303 |
+
flavor=flavor,
|
| 304 |
+
timeout=timeout,
|
| 305 |
+
namespace=namespace,
|
| 306 |
+
)
|
| 307 |
+
# Always print the job ID to the user
|
| 308 |
+
print(f"Job started with ID: {job.id}")
|
| 309 |
+
print(f"View at: {job.url}")
|
| 310 |
+
|
| 311 |
+
if detach:
|
| 312 |
+
return
|
| 313 |
+
# Now let's stream the logs
|
| 314 |
+
for log in api.fetch_job_logs(job_id=job.id, namespace=job.owner.name):
|
| 315 |
+
print(log)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
@jobs_cli.command("logs", examples=["hf jobs logs <job_id>"])
|
| 319 |
+
def jobs_logs(
|
| 320 |
+
job_id: JobIdArg,
|
| 321 |
+
namespace: NamespaceOpt = None,
|
| 322 |
+
token: TokenOpt = None,
|
| 323 |
+
) -> None:
|
| 324 |
+
"""Fetch the logs of a Job"""
|
| 325 |
+
api = get_hf_api(token=token)
|
| 326 |
+
try:
|
| 327 |
+
for log in api.fetch_job_logs(job_id=job_id, namespace=namespace):
|
| 328 |
+
print(log)
|
| 329 |
+
except HfHubHTTPError as e:
|
| 330 |
+
status = e.response.status_code if e.response is not None else None
|
| 331 |
+
if status == 404:
|
| 332 |
+
raise CLIError("Job not found. Please check the job ID.") from e
|
| 333 |
+
elif status == 403:
|
| 334 |
+
raise CLIError("Access denied. You may not have permission to view this job.") from e
|
| 335 |
+
else:
|
| 336 |
+
raise CLIError(f"Failed to fetch job logs: {e}") from e
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
def _matches_filters(job_properties: dict[str, str], filters: list[tuple[str, str, str]]) -> bool:
|
| 340 |
+
"""Check if scheduled job matches all specified filters."""
|
| 341 |
+
for key, op_str, pattern in filters:
|
| 342 |
+
value = job_properties.get(key)
|
| 343 |
+
if value is None:
|
| 344 |
+
if op_str == "!=":
|
| 345 |
+
continue
|
| 346 |
+
return False
|
| 347 |
+
match = fnmatch(value.lower(), pattern.lower())
|
| 348 |
+
if (op_str == "=" and not match) or (op_str == "!=" and match):
|
| 349 |
+
return False
|
| 350 |
+
return True
|
| 351 |
+
|
| 352 |
+
|
| 353 |
+
def _print_output(
|
| 354 |
+
rows: list[list[Union[str, int]]], headers: list[str], aliases: list[str], fmt: Optional[str]
|
| 355 |
+
) -> None:
|
| 356 |
+
"""Print output according to the chosen format."""
|
| 357 |
+
if fmt:
|
| 358 |
+
# Use custom template if provided
|
| 359 |
+
template = fmt
|
| 360 |
+
for row in rows:
|
| 361 |
+
line = template
|
| 362 |
+
for i, field in enumerate(aliases):
|
| 363 |
+
placeholder = f"{{{{.{field}}}}}"
|
| 364 |
+
if placeholder in line:
|
| 365 |
+
line = line.replace(placeholder, str(row[i]))
|
| 366 |
+
print(line)
|
| 367 |
+
else:
|
| 368 |
+
# Default tabular format
|
| 369 |
+
print(_tabulate(rows, headers=headers))
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
def _clear_line(n: int) -> None:
|
| 373 |
+
LINE_UP = "\033[1A"
|
| 374 |
+
LINE_CLEAR = "\x1b[2K"
|
| 375 |
+
for i in range(n):
|
| 376 |
+
print(LINE_UP, end=LINE_CLEAR)
|
| 377 |
+
|
| 378 |
+
|
| 379 |
+
def _get_jobs_stats_rows(
|
| 380 |
+
job_id: str, metrics_stream: Iterable[dict[str, Any]], table_headers: list[str]
|
| 381 |
+
) -> Iterable[tuple[bool, str, list[list[Union[str, int]]]]]:
|
| 382 |
+
for metrics in metrics_stream:
|
| 383 |
+
row = [
|
| 384 |
+
job_id,
|
| 385 |
+
f"{metrics['cpu_usage_pct']}%",
|
| 386 |
+
round(metrics["cpu_millicores"] / 1000.0, 1),
|
| 387 |
+
f"{round(100 * metrics['memory_used_bytes'] / metrics['memory_total_bytes'], 2)}%",
|
| 388 |
+
f"{_format_size(metrics['memory_used_bytes'])}B / {_format_size(metrics['memory_total_bytes'])}B",
|
| 389 |
+
f"{_format_size(metrics['rx_bps'])}bps / {_format_size(metrics['tx_bps'])}bps",
|
| 390 |
+
]
|
| 391 |
+
if metrics["gpus"] and isinstance(metrics["gpus"], dict):
|
| 392 |
+
rows = [row] + [[""] * len(row)] * (len(metrics["gpus"]) - 1)
|
| 393 |
+
for row, gpu_id in zip(rows, sorted(metrics["gpus"])):
|
| 394 |
+
gpu = metrics["gpus"][gpu_id]
|
| 395 |
+
row += [
|
| 396 |
+
f"{gpu['utilization']}%",
|
| 397 |
+
f"{round(100 * gpu['memory_used_bytes'] / gpu['memory_total_bytes'], 2)}%",
|
| 398 |
+
f"{_format_size(gpu['memory_used_bytes'])}B / {_format_size(gpu['memory_total_bytes'])}B",
|
| 399 |
+
]
|
| 400 |
+
else:
|
| 401 |
+
row += ["N/A"] * (len(table_headers) - len(row))
|
| 402 |
+
rows = [row]
|
| 403 |
+
yield False, job_id, rows
|
| 404 |
+
yield True, job_id, []
|
| 405 |
+
|
| 406 |
+
|
| 407 |
+
@jobs_cli.command("stats", examples=["hf jobs stats <job_id>"])
|
| 408 |
+
def jobs_stats(
|
| 409 |
+
job_ids: JobIdsArg = None,
|
| 410 |
+
namespace: NamespaceOpt = None,
|
| 411 |
+
token: TokenOpt = None,
|
| 412 |
+
) -> None:
|
| 413 |
+
"""Fetch the resource usage statistics and metrics of Jobs"""
|
| 414 |
+
api = get_hf_api(token=token)
|
| 415 |
+
if namespace is None:
|
| 416 |
+
namespace = api.whoami()["name"]
|
| 417 |
+
if job_ids is None:
|
| 418 |
+
job_ids = [
|
| 419 |
+
job.id
|
| 420 |
+
for job in api.list_jobs(namespace=namespace)
|
| 421 |
+
if (job.status.stage if job.status else "UNKNOWN") in ("RUNNING", "UPDATING")
|
| 422 |
+
]
|
| 423 |
+
if len(job_ids) == 0:
|
| 424 |
+
print("No running jobs found")
|
| 425 |
+
return
|
| 426 |
+
table_headers = [
|
| 427 |
+
"JOB ID",
|
| 428 |
+
"CPU %",
|
| 429 |
+
"NUM CPU",
|
| 430 |
+
"MEM %",
|
| 431 |
+
"MEM USAGE",
|
| 432 |
+
"NET I/O",
|
| 433 |
+
"GPU UTIL %",
|
| 434 |
+
"GPU MEM %",
|
| 435 |
+
"GPU MEM USAGE",
|
| 436 |
+
]
|
| 437 |
+
headers_aliases = [
|
| 438 |
+
"id",
|
| 439 |
+
"cpu_usage_pct",
|
| 440 |
+
"cpu_millicores",
|
| 441 |
+
"memory_used_bytes_pct",
|
| 442 |
+
"memory_used_bytes_and_total_bytes",
|
| 443 |
+
"rx_bps_and_tx_bps",
|
| 444 |
+
"gpu_utilization",
|
| 445 |
+
"gpu_memory_used_bytes_pct",
|
| 446 |
+
"gpu_memory_used_bytes_and_total_bytes",
|
| 447 |
+
]
|
| 448 |
+
try:
|
| 449 |
+
with multiprocessing.pool.ThreadPool(len(job_ids)) as pool:
|
| 450 |
+
rows_per_job_id: dict[str, list[list[Union[str, int]]]] = {}
|
| 451 |
+
for job_id in job_ids:
|
| 452 |
+
row: list[Union[str, int]] = [job_id]
|
| 453 |
+
row += ["-- / --" if ("/" in header or "USAGE" in header) else "--" for header in table_headers[1:]]
|
| 454 |
+
rows_per_job_id[job_id] = [row]
|
| 455 |
+
last_update_time = time.time()
|
| 456 |
+
total_rows = [row for job_id in rows_per_job_id for row in rows_per_job_id[job_id]]
|
| 457 |
+
_print_output(total_rows, table_headers, headers_aliases, None)
|
| 458 |
+
|
| 459 |
+
kwargs_list = [
|
| 460 |
+
{
|
| 461 |
+
"job_id": job_id,
|
| 462 |
+
"metrics_stream": api.fetch_job_metrics(job_id=job_id, namespace=namespace),
|
| 463 |
+
"table_headers": table_headers,
|
| 464 |
+
}
|
| 465 |
+
for job_id in job_ids
|
| 466 |
+
]
|
| 467 |
+
for done, job_id, rows in iflatmap_unordered(pool, _get_jobs_stats_rows, kwargs_list=kwargs_list):
|
| 468 |
+
if done:
|
| 469 |
+
rows_per_job_id.pop(job_id, None)
|
| 470 |
+
else:
|
| 471 |
+
rows_per_job_id[job_id] = rows
|
| 472 |
+
now = time.time()
|
| 473 |
+
if now - last_update_time >= STATS_UPDATE_MIN_INTERVAL:
|
| 474 |
+
_clear_line(2 + len(total_rows))
|
| 475 |
+
total_rows = [row for job_id in rows_per_job_id for row in rows_per_job_id[job_id]]
|
| 476 |
+
_print_output(total_rows, table_headers, headers_aliases, None)
|
| 477 |
+
last_update_time = now
|
| 478 |
+
except HfHubHTTPError as e:
|
| 479 |
+
status = e.response.status_code if e.response is not None else None
|
| 480 |
+
if status == 404:
|
| 481 |
+
raise CLIError("Job not found. Please check the job ID.") from e
|
| 482 |
+
elif status == 403:
|
| 483 |
+
raise CLIError("Access denied. You may not have permission to view this job.") from e
|
| 484 |
+
else:
|
| 485 |
+
raise CLIError(f"Failed to fetch job stats: {e}") from e
|
| 486 |
+
|
| 487 |
+
|
| 488 |
+
@jobs_cli.command("ps", examples=["hf jobs ps", "hf jobs ps -a"])
|
| 489 |
+
def jobs_ps(
|
| 490 |
+
all: Annotated[
|
| 491 |
+
bool,
|
| 492 |
+
typer.Option(
|
| 493 |
+
"-a",
|
| 494 |
+
"--all",
|
| 495 |
+
help="Show all Jobs (default shows just running)",
|
| 496 |
+
),
|
| 497 |
+
] = False,
|
| 498 |
+
namespace: NamespaceOpt = None,
|
| 499 |
+
token: TokenOpt = None,
|
| 500 |
+
filter: Annotated[
|
| 501 |
+
Optional[list[str]],
|
| 502 |
+
typer.Option(
|
| 503 |
+
"-f",
|
| 504 |
+
"--filter",
|
| 505 |
+
help="Filter output based on conditions provided (format: key=value)",
|
| 506 |
+
),
|
| 507 |
+
] = None,
|
| 508 |
+
format: Annotated[
|
| 509 |
+
Optional[str],
|
| 510 |
+
typer.Option(
|
| 511 |
+
help="Format output using a custom template",
|
| 512 |
+
),
|
| 513 |
+
] = None,
|
| 514 |
+
) -> None:
|
| 515 |
+
"""List Jobs."""
|
| 516 |
+
api = get_hf_api(token=token)
|
| 517 |
+
# Fetch jobs data
|
| 518 |
+
jobs = api.list_jobs(namespace=namespace)
|
| 519 |
+
# Define table headers
|
| 520 |
+
table_headers = ["JOB ID", "IMAGE/SPACE", "COMMAND", "CREATED", "STATUS"]
|
| 521 |
+
headers_aliases = ["id", "image", "command", "created", "status"]
|
| 522 |
+
rows: list[list[Union[str, int]]] = []
|
| 523 |
+
|
| 524 |
+
filters: list[tuple[str, str, str]] = []
|
| 525 |
+
labels_filters: list[tuple[str, str, str]] = []
|
| 526 |
+
for f in filter or []:
|
| 527 |
+
if f.startswith("label!=") or f.startswith("label="):
|
| 528 |
+
if f.startswith("label!="):
|
| 529 |
+
label_part = f[len("label!=") :]
|
| 530 |
+
if "=" in label_part:
|
| 531 |
+
print(
|
| 532 |
+
f"Warning: Ignoring invalid label filter format 'label!={label_part}'. Use label!=key format."
|
| 533 |
+
)
|
| 534 |
+
continue
|
| 535 |
+
label_key, op, label_value = label_part, "!=", "*"
|
| 536 |
+
else:
|
| 537 |
+
label_part = f[len("label=") :]
|
| 538 |
+
if "=" in label_part:
|
| 539 |
+
label_key, label_value = label_part.split("=", 1)
|
| 540 |
+
else:
|
| 541 |
+
label_key, label_value = label_part, "*"
|
| 542 |
+
# Negate predicate in case of key!=value
|
| 543 |
+
if label_key.endswith("!"):
|
| 544 |
+
op = "!="
|
| 545 |
+
label_key = label_key[:-1]
|
| 546 |
+
else:
|
| 547 |
+
op = "="
|
| 548 |
+
labels_filters.append((label_key.lower(), op, label_value.lower()))
|
| 549 |
+
elif "=" in f:
|
| 550 |
+
key, value = f.split("=", 1)
|
| 551 |
+
# Negate predicate in case of key!=value
|
| 552 |
+
if key.endswith("!"):
|
| 553 |
+
op = "!="
|
| 554 |
+
key = key[:-1]
|
| 555 |
+
else:
|
| 556 |
+
op = "="
|
| 557 |
+
filters.append((key.lower(), op, value.lower()))
|
| 558 |
+
else:
|
| 559 |
+
print(f"Warning: Ignoring invalid filter format '{f}'. Use key=value format.")
|
| 560 |
+
# Process jobs data
|
| 561 |
+
for job in jobs:
|
| 562 |
+
# Extract job data for filtering
|
| 563 |
+
status = job.status.stage if job.status else "UNKNOWN"
|
| 564 |
+
if not all and status not in ("RUNNING", "UPDATING"):
|
| 565 |
+
# Skip job if not all jobs should be shown and status doesn't match criteria
|
| 566 |
+
continue
|
| 567 |
+
# Extract job data for output
|
| 568 |
+
job_id = job.id
|
| 569 |
+
|
| 570 |
+
# Extract image or space information
|
| 571 |
+
image_or_space = job.docker_image or "N/A"
|
| 572 |
+
|
| 573 |
+
# Extract and format command
|
| 574 |
+
cmd = job.command or []
|
| 575 |
+
command_str = " ".join(cmd) if cmd else "N/A"
|
| 576 |
+
|
| 577 |
+
# Extract creation time
|
| 578 |
+
created_at = job.created_at.strftime("%Y-%m-%d %H:%M:%S") if job.created_at else "N/A"
|
| 579 |
+
|
| 580 |
+
# Create a dict with all job properties for filtering
|
| 581 |
+
props = {"id": job_id, "image": image_or_space, "status": status.lower(), "command": command_str}
|
| 582 |
+
if not _matches_filters(props, filters):
|
| 583 |
+
continue
|
| 584 |
+
if not _matches_filters(job.labels or {}, labels_filters):
|
| 585 |
+
continue
|
| 586 |
+
|
| 587 |
+
# Create row
|
| 588 |
+
rows.append([job_id, image_or_space, command_str, created_at, status])
|
| 589 |
+
|
| 590 |
+
# Handle empty results
|
| 591 |
+
if not rows:
|
| 592 |
+
filters_msg = f" matching filters: {', '.join([f'{k}{o}{v}' for k, o, v in filters])}" if filters else ""
|
| 593 |
+
print(f"No jobs found{filters_msg}")
|
| 594 |
+
return
|
| 595 |
+
# Apply custom format if provided or use default tabular format
|
| 596 |
+
_print_output(rows, table_headers, headers_aliases, format)
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
@jobs_cli.command("hardware", examples=["hf jobs hardware"])
|
| 600 |
+
def jobs_hardware() -> None:
|
| 601 |
+
"""List available hardware options for Jobs"""
|
| 602 |
+
api = get_hf_api()
|
| 603 |
+
hardware_list = api.list_jobs_hardware()
|
| 604 |
+
table_headers = ["NAME", "PRETTY NAME", "CPU", "RAM", "ACCELERATOR", "COST/MIN", "COST/HOUR"]
|
| 605 |
+
headers_aliases = ["name", "prettyName", "cpu", "ram", "accelerator", "costMin", "costHour"]
|
| 606 |
+
rows: list[list[Union[str, int]]] = []
|
| 607 |
+
|
| 608 |
+
for hw in hardware_list:
|
| 609 |
+
accelerator_info = "N/A"
|
| 610 |
+
if hw.accelerator:
|
| 611 |
+
accelerator_info = f"{hw.accelerator.quantity}x {hw.accelerator.model} ({hw.accelerator.vram})"
|
| 612 |
+
cost_min = f"${hw.unit_cost_usd:.4f}" if hw.unit_cost_usd is not None else "N/A"
|
| 613 |
+
cost_hour = f"${hw.unit_cost_usd * 60:.2f}" if hw.unit_cost_usd is not None else "N/A"
|
| 614 |
+
rows.append([hw.name, hw.pretty_name or "N/A", hw.cpu, hw.ram, accelerator_info, cost_min, cost_hour])
|
| 615 |
+
|
| 616 |
+
if not rows:
|
| 617 |
+
print("No hardware options found")
|
| 618 |
+
return
|
| 619 |
+
_print_output(rows, table_headers, headers_aliases, None)
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
@jobs_cli.command("inspect", examples=["hf jobs inspect <job_id>"])
|
| 623 |
+
def jobs_inspect(
|
| 624 |
+
job_ids: Annotated[
|
| 625 |
+
list[str],
|
| 626 |
+
typer.Argument(
|
| 627 |
+
help="The jobs to inspect",
|
| 628 |
+
),
|
| 629 |
+
],
|
| 630 |
+
namespace: NamespaceOpt = None,
|
| 631 |
+
token: TokenOpt = None,
|
| 632 |
+
) -> None:
|
| 633 |
+
"""Display detailed information on one or more Jobs"""
|
| 634 |
+
api = get_hf_api(token=token)
|
| 635 |
+
try:
|
| 636 |
+
jobs = [api.inspect_job(job_id=job_id, namespace=namespace) for job_id in job_ids]
|
| 637 |
+
print(json.dumps([asdict(job) for job in jobs], indent=4, default=str))
|
| 638 |
+
except HfHubHTTPError as e:
|
| 639 |
+
status = e.response.status_code if e.response is not None else None
|
| 640 |
+
if status == 404:
|
| 641 |
+
raise CLIError("Job not found. Please check the job ID.") from e
|
| 642 |
+
elif status == 403:
|
| 643 |
+
raise CLIError("Access denied. You may not have permission to view this job.") from e
|
| 644 |
+
else:
|
| 645 |
+
raise CLIError(f"Failed to inspect job: {e}") from e
|
| 646 |
+
|
| 647 |
+
|
| 648 |
+
@jobs_cli.command("cancel", examples=["hf jobs cancel <job_id>"])
|
| 649 |
+
def jobs_cancel(
|
| 650 |
+
job_id: JobIdArg,
|
| 651 |
+
namespace: NamespaceOpt = None,
|
| 652 |
+
token: TokenOpt = None,
|
| 653 |
+
) -> None:
|
| 654 |
+
"""Cancel a Job"""
|
| 655 |
+
api = get_hf_api(token=token)
|
| 656 |
+
try:
|
| 657 |
+
api.cancel_job(job_id=job_id, namespace=namespace)
|
| 658 |
+
except HfHubHTTPError as e:
|
| 659 |
+
status = e.response.status_code if e.response is not None else None
|
| 660 |
+
if status == 404:
|
| 661 |
+
raise CLIError("Job not found. Please check the job ID.") from e
|
| 662 |
+
elif status == 403:
|
| 663 |
+
raise CLIError("Access denied. You may not have permission to cancel this job.") from e
|
| 664 |
+
else:
|
| 665 |
+
raise CLIError(f"Failed to cancel job: {e}") from e
|
| 666 |
+
|
| 667 |
+
|
| 668 |
+
uv_app = typer_factory(help="Run UV scripts (Python with inline dependencies) on HF infrastructure.")
|
| 669 |
+
jobs_cli.add_typer(uv_app, name="uv")
|
| 670 |
+
|
| 671 |
+
|
| 672 |
+
@uv_app.command(
|
| 673 |
+
"run",
|
| 674 |
+
context_settings={"ignore_unknown_options": True},
|
| 675 |
+
examples=[
|
| 676 |
+
"hf jobs uv run my_script.py",
|
| 677 |
+
"hf jobs uv run ml_training.py --flavor a10g-small",
|
| 678 |
+
"hf jobs uv run --with transformers train.py",
|
| 679 |
+
],
|
| 680 |
+
)
|
| 681 |
+
def jobs_uv_run(
|
| 682 |
+
script: ScriptArg,
|
| 683 |
+
script_args: ScriptArgsArg = None,
|
| 684 |
+
image: ImageOpt = None,
|
| 685 |
+
flavor: FlavorOpt = None,
|
| 686 |
+
env: EnvOpt = None,
|
| 687 |
+
secrets: SecretsOpt = None,
|
| 688 |
+
label: LabelsOpt = None,
|
| 689 |
+
env_file: EnvFileOpt = None,
|
| 690 |
+
secrets_file: SecretsFileOpt = None,
|
| 691 |
+
timeout: TimeoutOpt = None,
|
| 692 |
+
detach: DetachOpt = False,
|
| 693 |
+
namespace: NamespaceOpt = None,
|
| 694 |
+
token: TokenOpt = None,
|
| 695 |
+
with_: WithOpt = None,
|
| 696 |
+
python: PythonOpt = None,
|
| 697 |
+
) -> None:
|
| 698 |
+
"""Run a UV script (local file or URL) on HF infrastructure"""
|
| 699 |
+
env_map: dict[str, Optional[str]] = {}
|
| 700 |
+
if env_file:
|
| 701 |
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
| 702 |
+
for env_value in env or []:
|
| 703 |
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
| 704 |
+
secrets_map: dict[str, Optional[str]] = {}
|
| 705 |
+
extended_environ = _get_extended_environ()
|
| 706 |
+
if secrets_file:
|
| 707 |
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
| 708 |
+
for secret in secrets or []:
|
| 709 |
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
| 710 |
+
|
| 711 |
+
api = get_hf_api(token=token)
|
| 712 |
+
job = api.run_uv_job(
|
| 713 |
+
script=script,
|
| 714 |
+
script_args=script_args or [],
|
| 715 |
+
dependencies=with_,
|
| 716 |
+
python=python,
|
| 717 |
+
image=image,
|
| 718 |
+
env=env_map,
|
| 719 |
+
secrets=secrets_map,
|
| 720 |
+
labels=_parse_labels_map(label),
|
| 721 |
+
flavor=flavor, # type: ignore[arg-type]
|
| 722 |
+
timeout=timeout,
|
| 723 |
+
namespace=namespace,
|
| 724 |
+
)
|
| 725 |
+
# Always print the job ID to the user
|
| 726 |
+
print(f"Job started with ID: {job.id}")
|
| 727 |
+
print(f"View at: {job.url}")
|
| 728 |
+
if detach:
|
| 729 |
+
return
|
| 730 |
+
# Now let's stream the logs
|
| 731 |
+
for log in api.fetch_job_logs(job_id=job.id, namespace=job.owner.name):
|
| 732 |
+
print(log)
|
| 733 |
+
|
| 734 |
+
|
| 735 |
+
scheduled_app = typer_factory(help="Create and manage scheduled Jobs on the Hub.")
|
| 736 |
+
jobs_cli.add_typer(scheduled_app, name="scheduled")
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
@scheduled_app.command(
|
| 740 |
+
"run",
|
| 741 |
+
context_settings={"ignore_unknown_options": True},
|
| 742 |
+
examples=['hf jobs scheduled run "0 0 * * *" python:3.12 python script.py'],
|
| 743 |
+
)
|
| 744 |
+
def scheduled_run(
|
| 745 |
+
schedule: ScheduleArg,
|
| 746 |
+
image: ImageArg,
|
| 747 |
+
command: CommandArg,
|
| 748 |
+
suspend: SuspendOpt = None,
|
| 749 |
+
concurrency: ConcurrencyOpt = None,
|
| 750 |
+
env: EnvOpt = None,
|
| 751 |
+
secrets: SecretsOpt = None,
|
| 752 |
+
label: LabelsOpt = None,
|
| 753 |
+
env_file: EnvFileOpt = None,
|
| 754 |
+
secrets_file: SecretsFileOpt = None,
|
| 755 |
+
flavor: FlavorOpt = None,
|
| 756 |
+
timeout: TimeoutOpt = None,
|
| 757 |
+
namespace: NamespaceOpt = None,
|
| 758 |
+
token: TokenOpt = None,
|
| 759 |
+
) -> None:
|
| 760 |
+
"""Schedule a Job."""
|
| 761 |
+
env_map: dict[str, Optional[str]] = {}
|
| 762 |
+
if env_file:
|
| 763 |
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
| 764 |
+
for env_value in env or []:
|
| 765 |
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
| 766 |
+
secrets_map: dict[str, Optional[str]] = {}
|
| 767 |
+
extended_environ = _get_extended_environ()
|
| 768 |
+
if secrets_file:
|
| 769 |
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
| 770 |
+
for secret in secrets or []:
|
| 771 |
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
| 772 |
+
|
| 773 |
+
api = get_hf_api(token=token)
|
| 774 |
+
scheduled_job = api.create_scheduled_job(
|
| 775 |
+
image=image,
|
| 776 |
+
command=command,
|
| 777 |
+
schedule=schedule,
|
| 778 |
+
suspend=suspend,
|
| 779 |
+
concurrency=concurrency,
|
| 780 |
+
env=env_map,
|
| 781 |
+
secrets=secrets_map,
|
| 782 |
+
labels=_parse_labels_map(label),
|
| 783 |
+
flavor=flavor,
|
| 784 |
+
timeout=timeout,
|
| 785 |
+
namespace=namespace,
|
| 786 |
+
)
|
| 787 |
+
print(f"Scheduled Job created with ID: {scheduled_job.id}")
|
| 788 |
+
|
| 789 |
+
|
| 790 |
+
@scheduled_app.command("ps", examples=["hf jobs scheduled ps"])
|
| 791 |
+
def scheduled_ps(
|
| 792 |
+
all: Annotated[
|
| 793 |
+
bool,
|
| 794 |
+
typer.Option(
|
| 795 |
+
"-a",
|
| 796 |
+
"--all",
|
| 797 |
+
help="Show all scheduled Jobs (default hides suspended)",
|
| 798 |
+
),
|
| 799 |
+
] = False,
|
| 800 |
+
namespace: NamespaceOpt = None,
|
| 801 |
+
token: TokenOpt = None,
|
| 802 |
+
filter: Annotated[
|
| 803 |
+
Optional[list[str]],
|
| 804 |
+
typer.Option(
|
| 805 |
+
"-f",
|
| 806 |
+
"--filter",
|
| 807 |
+
help="Filter output based on conditions provided (format: key=value)",
|
| 808 |
+
),
|
| 809 |
+
] = None,
|
| 810 |
+
format: Annotated[
|
| 811 |
+
Optional[str],
|
| 812 |
+
typer.Option(
|
| 813 |
+
"--format",
|
| 814 |
+
help="Format output using a custom template",
|
| 815 |
+
),
|
| 816 |
+
] = None,
|
| 817 |
+
) -> None:
|
| 818 |
+
"""List scheduled Jobs"""
|
| 819 |
+
api = get_hf_api(token=token)
|
| 820 |
+
scheduled_jobs = api.list_scheduled_jobs(namespace=namespace)
|
| 821 |
+
table_headers = ["ID", "SCHEDULE", "IMAGE/SPACE", "COMMAND", "LAST RUN", "NEXT RUN", "SUSPEND"]
|
| 822 |
+
headers_aliases = ["id", "schedule", "image", "command", "last", "next", "suspend"]
|
| 823 |
+
rows: list[list[Union[str, int]]] = []
|
| 824 |
+
filters: list[tuple[str, str, str]] = []
|
| 825 |
+
for f in filter or []:
|
| 826 |
+
if "=" in f:
|
| 827 |
+
key, value = f.split("=", 1)
|
| 828 |
+
# Negate predicate in case of key!=value
|
| 829 |
+
if key.endswith("!"):
|
| 830 |
+
op = "!="
|
| 831 |
+
key = key[:-1]
|
| 832 |
+
else:
|
| 833 |
+
op = "="
|
| 834 |
+
filters.append((key.lower(), op, value.lower()))
|
| 835 |
+
else:
|
| 836 |
+
print(f"Warning: Ignoring invalid filter format '{f}'. Use key=value format.")
|
| 837 |
+
|
| 838 |
+
for scheduled_job in scheduled_jobs:
|
| 839 |
+
suspend = scheduled_job.suspend or False
|
| 840 |
+
if not all and suspend:
|
| 841 |
+
continue
|
| 842 |
+
sj_id = scheduled_job.id
|
| 843 |
+
schedule = scheduled_job.schedule or "N/A"
|
| 844 |
+
image_or_space = scheduled_job.job_spec.docker_image or "N/A"
|
| 845 |
+
cmd = scheduled_job.job_spec.command or []
|
| 846 |
+
command_str = " ".join(cmd) if cmd else "N/A"
|
| 847 |
+
last_job_at = (
|
| 848 |
+
scheduled_job.status.last_job.at.strftime("%Y-%m-%d %H:%M:%S") if scheduled_job.status.last_job else "N/A"
|
| 849 |
+
)
|
| 850 |
+
next_job_run_at = (
|
| 851 |
+
scheduled_job.status.next_job_run_at.strftime("%Y-%m-%d %H:%M:%S")
|
| 852 |
+
if scheduled_job.status.next_job_run_at
|
| 853 |
+
else "N/A"
|
| 854 |
+
)
|
| 855 |
+
props = {"id": sj_id, "image": image_or_space, "suspend": str(suspend), "command": command_str}
|
| 856 |
+
if not _matches_filters(props, filters):
|
| 857 |
+
continue
|
| 858 |
+
rows.append([sj_id, schedule, image_or_space, command_str, last_job_at, next_job_run_at, suspend])
|
| 859 |
+
|
| 860 |
+
if not rows:
|
| 861 |
+
filters_msg = f" matching filters: {', '.join([f'{k}{o}{v}' for k, o, v in filters])}" if filters else ""
|
| 862 |
+
print(f"No scheduled jobs found{filters_msg}")
|
| 863 |
+
return
|
| 864 |
+
_print_output(rows, table_headers, headers_aliases, format)
|
| 865 |
+
|
| 866 |
+
|
| 867 |
+
@scheduled_app.command("inspect", examples=["hf jobs scheduled inspect <id>"])
|
| 868 |
+
def scheduled_inspect(
|
| 869 |
+
scheduled_job_ids: Annotated[
|
| 870 |
+
list[str],
|
| 871 |
+
typer.Argument(
|
| 872 |
+
help="The scheduled jobs to inspect",
|
| 873 |
+
),
|
| 874 |
+
],
|
| 875 |
+
namespace: NamespaceOpt = None,
|
| 876 |
+
token: TokenOpt = None,
|
| 877 |
+
) -> None:
|
| 878 |
+
"""Display detailed information on one or more scheduled Jobs"""
|
| 879 |
+
api = get_hf_api(token=token)
|
| 880 |
+
scheduled_jobs = [
|
| 881 |
+
api.inspect_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
| 882 |
+
for scheduled_job_id in scheduled_job_ids
|
| 883 |
+
]
|
| 884 |
+
print(json.dumps([asdict(scheduled_job) for scheduled_job in scheduled_jobs], indent=4, default=str))
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
@scheduled_app.command("delete", examples=["hf jobs scheduled delete <id>"])
|
| 888 |
+
def scheduled_delete(
|
| 889 |
+
scheduled_job_id: ScheduledJobIdArg,
|
| 890 |
+
namespace: NamespaceOpt = None,
|
| 891 |
+
token: TokenOpt = None,
|
| 892 |
+
) -> None:
|
| 893 |
+
"""Delete a scheduled Job."""
|
| 894 |
+
api = get_hf_api(token=token)
|
| 895 |
+
api.delete_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
| 896 |
+
|
| 897 |
+
|
| 898 |
+
@scheduled_app.command("suspend", examples=["hf jobs scheduled suspend <id>"])
|
| 899 |
+
def scheduled_suspend(
|
| 900 |
+
scheduled_job_id: ScheduledJobIdArg,
|
| 901 |
+
namespace: NamespaceOpt = None,
|
| 902 |
+
token: TokenOpt = None,
|
| 903 |
+
) -> None:
|
| 904 |
+
"""Suspend (pause) a scheduled Job."""
|
| 905 |
+
api = get_hf_api(token=token)
|
| 906 |
+
api.suspend_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
| 907 |
+
|
| 908 |
+
|
| 909 |
+
@scheduled_app.command("resume", examples=["hf jobs scheduled resume <id>"])
|
| 910 |
+
def scheduled_resume(
|
| 911 |
+
scheduled_job_id: ScheduledJobIdArg,
|
| 912 |
+
namespace: NamespaceOpt = None,
|
| 913 |
+
token: TokenOpt = None,
|
| 914 |
+
) -> None:
|
| 915 |
+
"""Resume (unpause) a scheduled Job."""
|
| 916 |
+
api = get_hf_api(token=token)
|
| 917 |
+
api.resume_scheduled_job(scheduled_job_id=scheduled_job_id, namespace=namespace)
|
| 918 |
+
|
| 919 |
+
|
| 920 |
+
scheduled_uv_app = typer_factory(help="Schedule UV scripts on HF infrastructure.")
|
| 921 |
+
scheduled_app.add_typer(scheduled_uv_app, name="uv")
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
@scheduled_uv_app.command(
|
| 925 |
+
"run",
|
| 926 |
+
context_settings={"ignore_unknown_options": True},
|
| 927 |
+
examples=[
|
| 928 |
+
'hf jobs scheduled uv run "0 0 * * *" script.py',
|
| 929 |
+
'hf jobs scheduled uv run "0 0 * * *" script.py --with pandas',
|
| 930 |
+
],
|
| 931 |
+
)
|
| 932 |
+
def scheduled_uv_run(
|
| 933 |
+
schedule: ScheduleArg,
|
| 934 |
+
script: ScriptArg,
|
| 935 |
+
script_args: ScriptArgsArg = None,
|
| 936 |
+
suspend: SuspendOpt = None,
|
| 937 |
+
concurrency: ConcurrencyOpt = None,
|
| 938 |
+
image: ImageOpt = None,
|
| 939 |
+
flavor: FlavorOpt = None,
|
| 940 |
+
env: EnvOpt = None,
|
| 941 |
+
secrets: SecretsOpt = None,
|
| 942 |
+
label: LabelsOpt = None,
|
| 943 |
+
env_file: EnvFileOpt = None,
|
| 944 |
+
secrets_file: SecretsFileOpt = None,
|
| 945 |
+
timeout: TimeoutOpt = None,
|
| 946 |
+
namespace: NamespaceOpt = None,
|
| 947 |
+
token: TokenOpt = None,
|
| 948 |
+
with_: WithOpt = None,
|
| 949 |
+
python: PythonOpt = None,
|
| 950 |
+
) -> None:
|
| 951 |
+
"""Run a UV script (local file or URL) on HF infrastructure"""
|
| 952 |
+
env_map: dict[str, Optional[str]] = {}
|
| 953 |
+
if env_file:
|
| 954 |
+
env_map.update(load_dotenv(Path(env_file).read_text(), environ=os.environ.copy()))
|
| 955 |
+
for env_value in env or []:
|
| 956 |
+
env_map.update(load_dotenv(env_value, environ=os.environ.copy()))
|
| 957 |
+
secrets_map: dict[str, Optional[str]] = {}
|
| 958 |
+
extended_environ = _get_extended_environ()
|
| 959 |
+
if secrets_file:
|
| 960 |
+
secrets_map.update(load_dotenv(Path(secrets_file).read_text(), environ=extended_environ))
|
| 961 |
+
for secret in secrets or []:
|
| 962 |
+
secrets_map.update(load_dotenv(secret, environ=extended_environ))
|
| 963 |
+
|
| 964 |
+
api = get_hf_api(token=token)
|
| 965 |
+
job = api.create_scheduled_uv_job(
|
| 966 |
+
script=script,
|
| 967 |
+
script_args=script_args or [],
|
| 968 |
+
schedule=schedule,
|
| 969 |
+
suspend=suspend,
|
| 970 |
+
concurrency=concurrency,
|
| 971 |
+
dependencies=with_,
|
| 972 |
+
python=python,
|
| 973 |
+
image=image,
|
| 974 |
+
env=env_map,
|
| 975 |
+
secrets=secrets_map,
|
| 976 |
+
labels=_parse_labels_map(label),
|
| 977 |
+
flavor=flavor, # type: ignore[arg-type]
|
| 978 |
+
timeout=timeout,
|
| 979 |
+
namespace=namespace,
|
| 980 |
+
)
|
| 981 |
+
print(f"Scheduled Job created with ID: {job.id}")
|
| 982 |
+
|
| 983 |
+
|
| 984 |
+
### UTILS
|
| 985 |
+
|
| 986 |
+
|
| 987 |
+
def _parse_labels_map(labels: Optional[list[str]]) -> Optional[dict[str, str]]:
|
| 988 |
+
"""Parse label key-value pairs from CLI arguments.
|
| 989 |
+
|
| 990 |
+
Args:
|
| 991 |
+
labels: List of label strings in KEY=VALUE format. If KEY only, then VALUE is set to empty string.
|
| 992 |
+
|
| 993 |
+
Returns:
|
| 994 |
+
Dictionary mapping label keys to values, or None if no labels provided.
|
| 995 |
+
"""
|
| 996 |
+
if not labels:
|
| 997 |
+
return None
|
| 998 |
+
labels_map: dict[str, str] = {}
|
| 999 |
+
for label_var in labels:
|
| 1000 |
+
key, value = label_var.split("=", 1) if "=" in label_var else (label_var, "")
|
| 1001 |
+
labels_map[key] = value
|
| 1002 |
+
return labels_map
|
| 1003 |
+
|
| 1004 |
+
|
| 1005 |
+
def _tabulate(rows: list[list[Union[str, int]]], headers: list[str]) -> str:
|
| 1006 |
+
"""
|
| 1007 |
+
Inspired by:
|
| 1008 |
+
|
| 1009 |
+
- stackoverflow.com/a/8356620/593036
|
| 1010 |
+
- stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
|
| 1011 |
+
"""
|
| 1012 |
+
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
|
| 1013 |
+
terminal_width = max(os.get_terminal_size().columns, len(headers) * 12)
|
| 1014 |
+
while len(headers) + sum(col_widths) > terminal_width:
|
| 1015 |
+
col_to_minimize = col_widths.index(max(col_widths))
|
| 1016 |
+
col_widths[col_to_minimize] //= 2
|
| 1017 |
+
if len(headers) + sum(col_widths) <= terminal_width:
|
| 1018 |
+
col_widths[col_to_minimize] = terminal_width - sum(col_widths) - len(headers) + col_widths[col_to_minimize]
|
| 1019 |
+
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
|
| 1020 |
+
lines = []
|
| 1021 |
+
lines.append(row_format.format(*headers))
|
| 1022 |
+
lines.append(row_format.format(*["-" * w for w in col_widths]))
|
| 1023 |
+
for row in rows:
|
| 1024 |
+
row_format_args = [
|
| 1025 |
+
str(x)[: col_width - 3] + "..." if len(str(x)) > col_width else str(x)
|
| 1026 |
+
for x, col_width in zip(row, col_widths)
|
| 1027 |
+
]
|
| 1028 |
+
lines.append(row_format.format(*row_format_args))
|
| 1029 |
+
return "\n".join(lines)
|
| 1030 |
+
|
| 1031 |
+
|
| 1032 |
+
def _get_extended_environ() -> Dict[str, str]:
|
| 1033 |
+
extended_environ = os.environ.copy()
|
| 1034 |
+
if (token := get_token()) is not None:
|
| 1035 |
+
extended_environ["HF_TOKEN"] = token
|
| 1036 |
+
return extended_environ
|
| 1037 |
+
|
| 1038 |
+
|
| 1039 |
+
T = TypeVar("T")
|
| 1040 |
+
|
| 1041 |
+
|
| 1042 |
+
def _write_generator_to_queue(queue: Queue[T], func: Callable[..., Iterable[T]], kwargs: dict) -> None:
|
| 1043 |
+
for result in func(**kwargs):
|
| 1044 |
+
queue.put(result)
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
def iflatmap_unordered(
|
| 1048 |
+
pool: multiprocessing.pool.ThreadPool,
|
| 1049 |
+
func: Callable[..., Iterable[T]],
|
| 1050 |
+
*,
|
| 1051 |
+
kwargs_list: list[dict],
|
| 1052 |
+
) -> Iterable[T]:
|
| 1053 |
+
"""
|
| 1054 |
+
Takes a function that returns an iterable of items, and run it in parallel using threads to return the flattened iterable of items as they arrive.
|
| 1055 |
+
|
| 1056 |
+
This is inspired by those three `map()` variants, and is the mix of all three:
|
| 1057 |
+
|
| 1058 |
+
* `imap()`: like `map()` but returns an iterable instead of a list of results
|
| 1059 |
+
* `imap_unordered()`: like `imap()` but the output is sorted by time of arrival
|
| 1060 |
+
* `flatmap()`: like `map()` but given a function which returns a list, `flatmap()` returns the flattened list that is the concatenation of all the output lists
|
| 1061 |
+
"""
|
| 1062 |
+
queue: Queue[T] = Queue()
|
| 1063 |
+
async_results = [pool.apply_async(_write_generator_to_queue, (queue, func, kwargs)) for kwargs in kwargs_list]
|
| 1064 |
+
try:
|
| 1065 |
+
while True:
|
| 1066 |
+
try:
|
| 1067 |
+
yield queue.get(timeout=0.05)
|
| 1068 |
+
except Empty:
|
| 1069 |
+
if all(async_result.ready() for async_result in async_results) and queue.empty():
|
| 1070 |
+
break
|
| 1071 |
+
except KeyboardInterrupt:
|
| 1072 |
+
pass
|
| 1073 |
+
finally:
|
| 1074 |
+
# we get the result in case there's an error to raise
|
| 1075 |
+
try:
|
| 1076 |
+
[async_result.get(timeout=0.05) for async_result in async_results]
|
| 1077 |
+
except multiprocessing.TimeoutError:
|
| 1078 |
+
pass
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/lfs.py
ADDED
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of a custom transfer agent for the transfer type "multipart" for
|
| 3 |
+
git-lfs.
|
| 4 |
+
|
| 5 |
+
Inspired by:
|
| 6 |
+
github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py
|
| 7 |
+
|
| 8 |
+
Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
To launch debugger while developing:
|
| 12 |
+
|
| 13 |
+
``` [lfs "customtransfer.multipart"]
|
| 14 |
+
path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678
|
| 15 |
+
--wait-for-client
|
| 16 |
+
/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py
|
| 17 |
+
lfs-multipart-upload ```"""
|
| 18 |
+
|
| 19 |
+
import json
|
| 20 |
+
import os
|
| 21 |
+
import subprocess
|
| 22 |
+
import sys
|
| 23 |
+
from typing import Annotated, Optional
|
| 24 |
+
|
| 25 |
+
import typer
|
| 26 |
+
|
| 27 |
+
from huggingface_hub.errors import CLIError
|
| 28 |
+
from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND
|
| 29 |
+
|
| 30 |
+
from ..utils import get_session, hf_raise_for_status, logging
|
| 31 |
+
from ..utils._lfs import SliceFileObj
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
logger = logging.get_logger(__name__)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def lfs_enable_largefiles(
|
| 38 |
+
path: Annotated[
|
| 39 |
+
str,
|
| 40 |
+
typer.Argument(
|
| 41 |
+
help="Local path to repository you want to configure.",
|
| 42 |
+
),
|
| 43 |
+
],
|
| 44 |
+
) -> None:
|
| 45 |
+
"""
|
| 46 |
+
Configure your repository to enable upload of files > 5GB.
|
| 47 |
+
|
| 48 |
+
This command sets up git-lfs to use the custom multipart transfer agent
|
| 49 |
+
which enables efficient uploading of large files in chunks.
|
| 50 |
+
"""
|
| 51 |
+
local_path = os.path.abspath(path)
|
| 52 |
+
if not os.path.isdir(local_path):
|
| 53 |
+
raise CLIError("This does not look like a valid git repo.")
|
| 54 |
+
subprocess.run(
|
| 55 |
+
"git config lfs.customtransfer.multipart.path hf".split(),
|
| 56 |
+
check=True,
|
| 57 |
+
cwd=local_path,
|
| 58 |
+
)
|
| 59 |
+
subprocess.run(
|
| 60 |
+
f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(),
|
| 61 |
+
check=True,
|
| 62 |
+
cwd=local_path,
|
| 63 |
+
)
|
| 64 |
+
print("Local repo set up for largefiles")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
def write_msg(msg: dict):
|
| 68 |
+
"""Write out the message in Line delimited JSON."""
|
| 69 |
+
msg_str = json.dumps(msg) + "\n"
|
| 70 |
+
sys.stdout.write(msg_str)
|
| 71 |
+
sys.stdout.flush()
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def read_msg() -> Optional[dict]:
|
| 75 |
+
"""Read Line delimited JSON from stdin."""
|
| 76 |
+
msg = json.loads(sys.stdin.readline().strip())
|
| 77 |
+
|
| 78 |
+
if "terminate" in (msg.get("type"), msg.get("event")):
|
| 79 |
+
# terminate message received
|
| 80 |
+
return None
|
| 81 |
+
|
| 82 |
+
if msg.get("event") not in ("download", "upload"):
|
| 83 |
+
logger.critical("Received unexpected message")
|
| 84 |
+
sys.exit(1)
|
| 85 |
+
|
| 86 |
+
return msg
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def lfs_multipart_upload() -> None:
|
| 90 |
+
"""Internal git-lfs custom transfer agent for multipart uploads.
|
| 91 |
+
|
| 92 |
+
This function implements the custom transfer protocol for git-lfs multipart uploads.
|
| 93 |
+
Handles chunked uploads of large files to Hugging Face Hub.
|
| 94 |
+
"""
|
| 95 |
+
# Immediately after invoking a custom transfer process, git-lfs
|
| 96 |
+
# sends initiation data to the process over stdin.
|
| 97 |
+
# This tells the process useful information about the configuration.
|
| 98 |
+
init_msg = json.loads(sys.stdin.readline().strip())
|
| 99 |
+
if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"):
|
| 100 |
+
write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}})
|
| 101 |
+
sys.exit(1)
|
| 102 |
+
|
| 103 |
+
# The transfer process should use the information it needs from the
|
| 104 |
+
# initiation structure, and also perform any one-off setup tasks it
|
| 105 |
+
# needs to do. It should then respond on stdout with a simple empty
|
| 106 |
+
# confirmation structure, as follows:
|
| 107 |
+
write_msg({})
|
| 108 |
+
|
| 109 |
+
# After the initiation exchange, git-lfs will send any number of
|
| 110 |
+
# transfer requests to the stdin of the transfer process, in a serial sequence.
|
| 111 |
+
while True:
|
| 112 |
+
msg = read_msg()
|
| 113 |
+
if msg is None:
|
| 114 |
+
# When all transfers have been processed, git-lfs will send
|
| 115 |
+
# a terminate event to the stdin of the transfer process.
|
| 116 |
+
# On receiving this message the transfer process should
|
| 117 |
+
# clean up and terminate. No response is expected.
|
| 118 |
+
sys.exit(0)
|
| 119 |
+
|
| 120 |
+
oid = msg["oid"]
|
| 121 |
+
filepath = msg["path"]
|
| 122 |
+
completion_url = msg["action"]["href"]
|
| 123 |
+
header = msg["action"]["header"]
|
| 124 |
+
chunk_size = int(header.pop("chunk_size"))
|
| 125 |
+
presigned_urls: list[str] = list(header.values())
|
| 126 |
+
|
| 127 |
+
# Send a "started" progress event to allow other workers to start.
|
| 128 |
+
# Otherwise they're delayed until first "progress" event is reported,
|
| 129 |
+
# i.e. after the first 5GB by default (!)
|
| 130 |
+
write_msg(
|
| 131 |
+
{
|
| 132 |
+
"event": "progress",
|
| 133 |
+
"oid": oid,
|
| 134 |
+
"bytesSoFar": 1,
|
| 135 |
+
"bytesSinceLast": 0,
|
| 136 |
+
}
|
| 137 |
+
)
|
| 138 |
+
|
| 139 |
+
parts = []
|
| 140 |
+
with open(filepath, "rb") as file:
|
| 141 |
+
for i, presigned_url in enumerate(presigned_urls):
|
| 142 |
+
with SliceFileObj(
|
| 143 |
+
file,
|
| 144 |
+
seek_from=i * chunk_size,
|
| 145 |
+
read_limit=chunk_size,
|
| 146 |
+
) as data:
|
| 147 |
+
r = get_session().put(presigned_url, data=data)
|
| 148 |
+
hf_raise_for_status(r)
|
| 149 |
+
parts.append(
|
| 150 |
+
{
|
| 151 |
+
"etag": r.headers.get("etag"),
|
| 152 |
+
"partNumber": i + 1,
|
| 153 |
+
}
|
| 154 |
+
)
|
| 155 |
+
# In order to support progress reporting while data is uploading / downloading,
|
| 156 |
+
# the transfer process should post messages to stdout
|
| 157 |
+
write_msg(
|
| 158 |
+
{
|
| 159 |
+
"event": "progress",
|
| 160 |
+
"oid": oid,
|
| 161 |
+
"bytesSoFar": (i + 1) * chunk_size,
|
| 162 |
+
"bytesSinceLast": chunk_size,
|
| 163 |
+
}
|
| 164 |
+
)
|
| 165 |
+
|
| 166 |
+
r = get_session().post(
|
| 167 |
+
completion_url,
|
| 168 |
+
json={
|
| 169 |
+
"oid": oid,
|
| 170 |
+
"parts": parts,
|
| 171 |
+
},
|
| 172 |
+
)
|
| 173 |
+
hf_raise_for_status(r)
|
| 174 |
+
|
| 175 |
+
write_msg({"event": "complete", "oid": oid})
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/models.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2026 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to interact with models on the Hugging Face Hub.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# list models on the Hub
|
| 18 |
+
hf models ls
|
| 19 |
+
|
| 20 |
+
# list models with a search query
|
| 21 |
+
hf models ls --search "llama"
|
| 22 |
+
|
| 23 |
+
# get info about a model
|
| 24 |
+
hf models info Lightricks/LTX-2
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
import enum
|
| 28 |
+
import json
|
| 29 |
+
from typing import Annotated, Optional, get_args
|
| 30 |
+
|
| 31 |
+
import typer
|
| 32 |
+
|
| 33 |
+
from huggingface_hub.errors import CLIError, RepositoryNotFoundError, RevisionNotFoundError
|
| 34 |
+
from huggingface_hub.hf_api import ExpandModelProperty_T, ModelSort_T
|
| 35 |
+
|
| 36 |
+
from ._cli_utils import (
|
| 37 |
+
AuthorOpt,
|
| 38 |
+
FilterOpt,
|
| 39 |
+
FormatOpt,
|
| 40 |
+
LimitOpt,
|
| 41 |
+
OutputFormat,
|
| 42 |
+
QuietOpt,
|
| 43 |
+
RevisionOpt,
|
| 44 |
+
SearchOpt,
|
| 45 |
+
TokenOpt,
|
| 46 |
+
api_object_to_dict,
|
| 47 |
+
get_hf_api,
|
| 48 |
+
make_expand_properties_parser,
|
| 49 |
+
print_list_output,
|
| 50 |
+
typer_factory,
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
_EXPAND_PROPERTIES = sorted(get_args(ExpandModelProperty_T))
|
| 55 |
+
_SORT_OPTIONS = get_args(ModelSort_T)
|
| 56 |
+
ModelSortEnum = enum.Enum("ModelSortEnum", {s: s for s in _SORT_OPTIONS}, type=str) # type: ignore[misc]
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
ExpandOpt = Annotated[
|
| 60 |
+
Optional[str],
|
| 61 |
+
typer.Option(
|
| 62 |
+
help=f"Comma-separated properties to expand. Example: '--expand=downloads,likes,tags'. Valid: {', '.join(_EXPAND_PROPERTIES)}.",
|
| 63 |
+
callback=make_expand_properties_parser(_EXPAND_PROPERTIES),
|
| 64 |
+
),
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
models_cli = typer_factory(help="Interact with models on the Hub.")
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@models_cli.command(
|
| 72 |
+
"ls",
|
| 73 |
+
examples=[
|
| 74 |
+
"hf models ls --sort downloads --limit 10",
|
| 75 |
+
'hf models ls --search "llama" --author meta-llama',
|
| 76 |
+
],
|
| 77 |
+
)
|
| 78 |
+
def models_ls(
|
| 79 |
+
search: SearchOpt = None,
|
| 80 |
+
author: AuthorOpt = None,
|
| 81 |
+
filter: FilterOpt = None,
|
| 82 |
+
sort: Annotated[
|
| 83 |
+
Optional[ModelSortEnum],
|
| 84 |
+
typer.Option(help="Sort results."),
|
| 85 |
+
] = None,
|
| 86 |
+
limit: LimitOpt = 10,
|
| 87 |
+
expand: ExpandOpt = None,
|
| 88 |
+
format: FormatOpt = OutputFormat.table,
|
| 89 |
+
quiet: QuietOpt = False,
|
| 90 |
+
token: TokenOpt = None,
|
| 91 |
+
) -> None:
|
| 92 |
+
"""List models on the Hub."""
|
| 93 |
+
api = get_hf_api(token=token)
|
| 94 |
+
sort_key = sort.value if sort else None
|
| 95 |
+
results = [
|
| 96 |
+
api_object_to_dict(model_info)
|
| 97 |
+
for model_info in api.list_models(
|
| 98 |
+
filter=filter, author=author, search=search, sort=sort_key, limit=limit, expand=expand
|
| 99 |
+
)
|
| 100 |
+
]
|
| 101 |
+
print_list_output(results, format=format, quiet=quiet)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
@models_cli.command(
|
| 105 |
+
"info",
|
| 106 |
+
examples=[
|
| 107 |
+
"hf models info meta-llama/Llama-3.2-1B-Instruct",
|
| 108 |
+
"hf models info gpt2 --expand downloads,likes,tags",
|
| 109 |
+
],
|
| 110 |
+
)
|
| 111 |
+
def models_info(
|
| 112 |
+
model_id: Annotated[str, typer.Argument(help="The model ID (e.g. `username/repo-name`).")],
|
| 113 |
+
revision: RevisionOpt = None,
|
| 114 |
+
expand: ExpandOpt = None,
|
| 115 |
+
token: TokenOpt = None,
|
| 116 |
+
) -> None:
|
| 117 |
+
"""Get info about a model on the Hub."""
|
| 118 |
+
api = get_hf_api(token=token)
|
| 119 |
+
try:
|
| 120 |
+
info = api.model_info(repo_id=model_id, revision=revision, expand=expand) # type: ignore[arg-type]
|
| 121 |
+
except RepositoryNotFoundError as e:
|
| 122 |
+
raise CLIError(f"Model '{model_id}' not found.") from e
|
| 123 |
+
except RevisionNotFoundError as e:
|
| 124 |
+
raise CLIError(f"Revision '{revision}' not found on '{model_id}'.") from e
|
| 125 |
+
print(json.dumps(api_object_to_dict(info), indent=2))
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/papers.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to interact with papers on the Hugging Face Hub.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# list daily papers (most recently submitted)
|
| 18 |
+
hf papers ls
|
| 19 |
+
|
| 20 |
+
# list trending papers
|
| 21 |
+
hf papers ls --sort=trending
|
| 22 |
+
|
| 23 |
+
# list papers from a specific date, ordered by upvotes
|
| 24 |
+
hf papers ls --date=2025-01-23
|
| 25 |
+
|
| 26 |
+
# list today's papers, ordered by upvotes
|
| 27 |
+
hf papers ls --date=today
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
import datetime
|
| 31 |
+
import enum
|
| 32 |
+
import json
|
| 33 |
+
from typing import Annotated, Optional, get_args
|
| 34 |
+
|
| 35 |
+
import typer
|
| 36 |
+
|
| 37 |
+
from huggingface_hub.hf_api import DailyPapersSort_T
|
| 38 |
+
|
| 39 |
+
from ._cli_utils import (
|
| 40 |
+
LimitOpt,
|
| 41 |
+
TokenOpt,
|
| 42 |
+
api_object_to_dict,
|
| 43 |
+
get_hf_api,
|
| 44 |
+
typer_factory,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
_SORT_OPTIONS = get_args(DailyPapersSort_T)
|
| 49 |
+
PaperSortEnum = enum.Enum("PaperSortEnum", {s: s for s in _SORT_OPTIONS}, type=str) # type: ignore[misc]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _parse_date(value: Optional[str]) -> Optional[str]:
|
| 53 |
+
"""Parse date option, converting 'today' to current date."""
|
| 54 |
+
if value is None:
|
| 55 |
+
return None
|
| 56 |
+
if value.lower() == "today":
|
| 57 |
+
return datetime.date.today().isoformat()
|
| 58 |
+
return value
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
papers_cli = typer_factory(help="Interact with papers on the Hub.")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@papers_cli.command(
|
| 65 |
+
"ls",
|
| 66 |
+
examples=[
|
| 67 |
+
"hf papers ls",
|
| 68 |
+
"hf papers ls --sort trending",
|
| 69 |
+
"hf papers ls --date 2025-01-23",
|
| 70 |
+
],
|
| 71 |
+
)
|
| 72 |
+
def papers_ls(
|
| 73 |
+
date: Annotated[
|
| 74 |
+
Optional[str],
|
| 75 |
+
typer.Option(
|
| 76 |
+
help="Date in ISO format (YYYY-MM-DD) or 'today'.",
|
| 77 |
+
callback=_parse_date,
|
| 78 |
+
),
|
| 79 |
+
] = None,
|
| 80 |
+
sort: Annotated[
|
| 81 |
+
Optional[PaperSortEnum],
|
| 82 |
+
typer.Option(help="Sort results."),
|
| 83 |
+
] = None,
|
| 84 |
+
limit: LimitOpt = 50,
|
| 85 |
+
token: TokenOpt = None,
|
| 86 |
+
) -> None:
|
| 87 |
+
"""List daily papers on the Hub."""
|
| 88 |
+
api = get_hf_api(token=token)
|
| 89 |
+
sort_key = sort.value if sort else None
|
| 90 |
+
results = [
|
| 91 |
+
api_object_to_dict(paper_info)
|
| 92 |
+
for paper_info in api.list_daily_papers(
|
| 93 |
+
date=date,
|
| 94 |
+
sort=sort_key,
|
| 95 |
+
limit=limit,
|
| 96 |
+
)
|
| 97 |
+
]
|
| 98 |
+
print(json.dumps(results, indent=2))
|
venv/lib/python3.10/site-packages/huggingface_hub/cli/repo.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""Contains commands to interact with repositories on the Hugging Face Hub.
|
| 15 |
+
|
| 16 |
+
Usage:
|
| 17 |
+
# create a new dataset repo on the Hub
|
| 18 |
+
hf repo create my-cool-dataset --repo-type=dataset
|
| 19 |
+
|
| 20 |
+
# create a private model repo on the Hub
|
| 21 |
+
hf repo create my-cool-model --private
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
import enum
|
| 25 |
+
from typing import Annotated, Optional
|
| 26 |
+
|
| 27 |
+
import typer
|
| 28 |
+
|
| 29 |
+
from huggingface_hub.errors import CLIError, HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError
|
| 30 |
+
from huggingface_hub.utils import ANSI
|
| 31 |
+
|
| 32 |
+
from ._cli_utils import (
|
| 33 |
+
PrivateOpt,
|
| 34 |
+
RepoIdArg,
|
| 35 |
+
RepoType,
|
| 36 |
+
RepoTypeOpt,
|
| 37 |
+
RevisionOpt,
|
| 38 |
+
TokenOpt,
|
| 39 |
+
get_hf_api,
|
| 40 |
+
typer_factory,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
repo_cli = typer_factory(help="Manage repos on the Hub.")
|
| 45 |
+
tag_cli = typer_factory(help="Manage tags for a repo on the Hub.")
|
| 46 |
+
branch_cli = typer_factory(help="Manage branches for a repo on the Hub.")
|
| 47 |
+
repo_cli.add_typer(tag_cli, name="tag")
|
| 48 |
+
repo_cli.add_typer(branch_cli, name="branch")
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class GatedChoices(str, enum.Enum):
|
| 52 |
+
auto = "auto"
|
| 53 |
+
manual = "manual"
|
| 54 |
+
false = "false"
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@repo_cli.command(
|
| 58 |
+
"create",
|
| 59 |
+
examples=[
|
| 60 |
+
"hf repo create my-model",
|
| 61 |
+
"hf repo create my-dataset --repo-type dataset --private",
|
| 62 |
+
],
|
| 63 |
+
)
|
| 64 |
+
def repo_create(
|
| 65 |
+
repo_id: RepoIdArg,
|
| 66 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 67 |
+
space_sdk: Annotated[
|
| 68 |
+
Optional[str],
|
| 69 |
+
typer.Option(
|
| 70 |
+
help="Hugging Face Spaces SDK type. Required when --type is set to 'space'.",
|
| 71 |
+
),
|
| 72 |
+
] = None,
|
| 73 |
+
private: PrivateOpt = None,
|
| 74 |
+
token: TokenOpt = None,
|
| 75 |
+
exist_ok: Annotated[
|
| 76 |
+
bool,
|
| 77 |
+
typer.Option(
|
| 78 |
+
help="Do not raise an error if repo already exists.",
|
| 79 |
+
),
|
| 80 |
+
] = False,
|
| 81 |
+
resource_group_id: Annotated[
|
| 82 |
+
Optional[str],
|
| 83 |
+
typer.Option(
|
| 84 |
+
help="Resource group in which to create the repo. Resource groups is only available for Enterprise Hub organizations.",
|
| 85 |
+
),
|
| 86 |
+
] = None,
|
| 87 |
+
) -> None:
|
| 88 |
+
"""Create a new repo on the Hub."""
|
| 89 |
+
api = get_hf_api(token=token)
|
| 90 |
+
repo_url = api.create_repo(
|
| 91 |
+
repo_id=repo_id,
|
| 92 |
+
repo_type=repo_type.value,
|
| 93 |
+
private=private,
|
| 94 |
+
token=token,
|
| 95 |
+
exist_ok=exist_ok,
|
| 96 |
+
resource_group_id=resource_group_id,
|
| 97 |
+
space_sdk=space_sdk,
|
| 98 |
+
)
|
| 99 |
+
print(f"Successfully created {ANSI.bold(repo_url.repo_id)} on the Hub.")
|
| 100 |
+
print(f"Your repo is now available at {ANSI.bold(repo_url)}")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@repo_cli.command("delete", examples=["hf repo delete my-model"])
|
| 104 |
+
def repo_delete(
|
| 105 |
+
repo_id: RepoIdArg,
|
| 106 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 107 |
+
token: TokenOpt = None,
|
| 108 |
+
missing_ok: Annotated[
|
| 109 |
+
bool,
|
| 110 |
+
typer.Option(
|
| 111 |
+
help="If set to True, do not raise an error if repo does not exist.",
|
| 112 |
+
),
|
| 113 |
+
] = False,
|
| 114 |
+
) -> None:
|
| 115 |
+
"""Delete a repo from the Hub. This is an irreversible operation."""
|
| 116 |
+
api = get_hf_api(token=token)
|
| 117 |
+
api.delete_repo(
|
| 118 |
+
repo_id=repo_id,
|
| 119 |
+
repo_type=repo_type.value,
|
| 120 |
+
missing_ok=missing_ok,
|
| 121 |
+
)
|
| 122 |
+
print(f"Successfully deleted {ANSI.bold(repo_id)} on the Hub.")
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
@repo_cli.command("move", examples=["hf repo move old-namespace/my-model new-namespace/my-model"])
|
| 126 |
+
def repo_move(
|
| 127 |
+
from_id: RepoIdArg,
|
| 128 |
+
to_id: RepoIdArg,
|
| 129 |
+
token: TokenOpt = None,
|
| 130 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 131 |
+
) -> None:
|
| 132 |
+
"""Move a repository from a namespace to another namespace."""
|
| 133 |
+
api = get_hf_api(token=token)
|
| 134 |
+
api.move_repo(
|
| 135 |
+
from_id=from_id,
|
| 136 |
+
to_id=to_id,
|
| 137 |
+
repo_type=repo_type.value,
|
| 138 |
+
)
|
| 139 |
+
print(f"Successfully moved {ANSI.bold(from_id)} to {ANSI.bold(to_id)} on the Hub.")
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
@repo_cli.command(
|
| 143 |
+
"settings",
|
| 144 |
+
examples=[
|
| 145 |
+
"hf repo settings my-model --private",
|
| 146 |
+
"hf repo settings my-model --gated auto",
|
| 147 |
+
],
|
| 148 |
+
)
|
| 149 |
+
def repo_settings(
|
| 150 |
+
repo_id: RepoIdArg,
|
| 151 |
+
gated: Annotated[
|
| 152 |
+
Optional[GatedChoices],
|
| 153 |
+
typer.Option(
|
| 154 |
+
help="The gated status for the repository.",
|
| 155 |
+
),
|
| 156 |
+
] = None,
|
| 157 |
+
private: Annotated[
|
| 158 |
+
Optional[bool],
|
| 159 |
+
typer.Option(
|
| 160 |
+
help="Whether the repository should be private.",
|
| 161 |
+
),
|
| 162 |
+
] = None,
|
| 163 |
+
token: TokenOpt = None,
|
| 164 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 165 |
+
) -> None:
|
| 166 |
+
"""Update the settings of a repository."""
|
| 167 |
+
api = get_hf_api(token=token)
|
| 168 |
+
api.update_repo_settings(
|
| 169 |
+
repo_id=repo_id,
|
| 170 |
+
gated=(gated.value if gated else None), # type: ignore [arg-type]
|
| 171 |
+
private=private,
|
| 172 |
+
repo_type=repo_type.value,
|
| 173 |
+
)
|
| 174 |
+
print(f"Successfully updated the settings of {ANSI.bold(repo_id)} on the Hub.")
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
@branch_cli.command(
|
| 178 |
+
"create",
|
| 179 |
+
examples=[
|
| 180 |
+
"hf repo branch create my-model dev",
|
| 181 |
+
"hf repo branch create my-model dev --revision abc123",
|
| 182 |
+
],
|
| 183 |
+
)
|
| 184 |
+
def branch_create(
|
| 185 |
+
repo_id: RepoIdArg,
|
| 186 |
+
branch: Annotated[
|
| 187 |
+
str,
|
| 188 |
+
typer.Argument(
|
| 189 |
+
help="The name of the branch to create.",
|
| 190 |
+
),
|
| 191 |
+
],
|
| 192 |
+
revision: RevisionOpt = None,
|
| 193 |
+
token: TokenOpt = None,
|
| 194 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 195 |
+
exist_ok: Annotated[
|
| 196 |
+
bool,
|
| 197 |
+
typer.Option(
|
| 198 |
+
help="If set to True, do not raise an error if branch already exists.",
|
| 199 |
+
),
|
| 200 |
+
] = False,
|
| 201 |
+
) -> None:
|
| 202 |
+
"""Create a new branch for a repo on the Hub."""
|
| 203 |
+
api = get_hf_api(token=token)
|
| 204 |
+
api.create_branch(
|
| 205 |
+
repo_id=repo_id,
|
| 206 |
+
branch=branch,
|
| 207 |
+
revision=revision,
|
| 208 |
+
repo_type=repo_type.value,
|
| 209 |
+
exist_ok=exist_ok,
|
| 210 |
+
)
|
| 211 |
+
print(f"Successfully created {ANSI.bold(branch)} branch on {repo_type.value} {ANSI.bold(repo_id)}")
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
@branch_cli.command("delete", examples=["hf repo branch delete my-model dev"])
|
| 215 |
+
def branch_delete(
|
| 216 |
+
repo_id: RepoIdArg,
|
| 217 |
+
branch: Annotated[
|
| 218 |
+
str,
|
| 219 |
+
typer.Argument(
|
| 220 |
+
help="The name of the branch to delete.",
|
| 221 |
+
),
|
| 222 |
+
],
|
| 223 |
+
token: TokenOpt = None,
|
| 224 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 225 |
+
) -> None:
|
| 226 |
+
"""Delete a branch from a repo on the Hub."""
|
| 227 |
+
api = get_hf_api(token=token)
|
| 228 |
+
api.delete_branch(
|
| 229 |
+
repo_id=repo_id,
|
| 230 |
+
branch=branch,
|
| 231 |
+
repo_type=repo_type.value,
|
| 232 |
+
)
|
| 233 |
+
print(f"Successfully deleted {ANSI.bold(branch)} branch on {repo_type.value} {ANSI.bold(repo_id)}")
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
@tag_cli.command(
|
| 237 |
+
"create",
|
| 238 |
+
examples=[
|
| 239 |
+
"hf repo tag create my-model v1.0",
|
| 240 |
+
'hf repo tag create my-model v1.0 -m "First release"',
|
| 241 |
+
],
|
| 242 |
+
)
|
| 243 |
+
def tag_create(
|
| 244 |
+
repo_id: RepoIdArg,
|
| 245 |
+
tag: Annotated[
|
| 246 |
+
str,
|
| 247 |
+
typer.Argument(
|
| 248 |
+
help="The name of the tag to create.",
|
| 249 |
+
),
|
| 250 |
+
],
|
| 251 |
+
message: Annotated[
|
| 252 |
+
Optional[str],
|
| 253 |
+
typer.Option(
|
| 254 |
+
"-m",
|
| 255 |
+
"--message",
|
| 256 |
+
help="The description of the tag to create.",
|
| 257 |
+
),
|
| 258 |
+
] = None,
|
| 259 |
+
revision: RevisionOpt = None,
|
| 260 |
+
token: TokenOpt = None,
|
| 261 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 262 |
+
) -> None:
|
| 263 |
+
"""Create a tag for a repo."""
|
| 264 |
+
repo_type_str = repo_type.value
|
| 265 |
+
api = get_hf_api(token=token)
|
| 266 |
+
print(f"You are about to create tag {ANSI.bold(tag)} on {repo_type_str} {ANSI.bold(repo_id)}")
|
| 267 |
+
try:
|
| 268 |
+
api.create_tag(repo_id=repo_id, tag=tag, tag_message=message, revision=revision, repo_type=repo_type_str)
|
| 269 |
+
except RepositoryNotFoundError as e:
|
| 270 |
+
raise CLIError(f"{repo_type_str.capitalize()} '{repo_id}' not found.") from e
|
| 271 |
+
except RevisionNotFoundError as e:
|
| 272 |
+
raise CLIError(f"Revision '{revision}' not found.") from e
|
| 273 |
+
except HfHubHTTPError as e:
|
| 274 |
+
if e.response.status_code == 409:
|
| 275 |
+
raise CLIError(f"Tag '{tag}' already exists on '{repo_id}'.") from e
|
| 276 |
+
raise
|
| 277 |
+
print(f"Tag {ANSI.bold(tag)} created on {ANSI.bold(repo_id)}")
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
@tag_cli.command("list", examples=["hf repo tag list my-model"])
|
| 281 |
+
def tag_list(
|
| 282 |
+
repo_id: RepoIdArg,
|
| 283 |
+
token: TokenOpt = None,
|
| 284 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 285 |
+
) -> None:
|
| 286 |
+
"""List tags for a repo."""
|
| 287 |
+
repo_type_str = repo_type.value
|
| 288 |
+
api = get_hf_api(token=token)
|
| 289 |
+
try:
|
| 290 |
+
refs = api.list_repo_refs(repo_id=repo_id, repo_type=repo_type_str)
|
| 291 |
+
except RepositoryNotFoundError as e:
|
| 292 |
+
raise CLIError(f"{repo_type_str.capitalize()} '{repo_id}' not found.") from e
|
| 293 |
+
if len(refs.tags) == 0:
|
| 294 |
+
print("No tags found")
|
| 295 |
+
raise typer.Exit(code=0)
|
| 296 |
+
print(f"Tags for {repo_type_str} {ANSI.bold(repo_id)}:")
|
| 297 |
+
for t in refs.tags:
|
| 298 |
+
print(t.name)
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
@tag_cli.command("delete", examples=["hf repo tag delete my-model v1.0"])
|
| 302 |
+
def tag_delete(
|
| 303 |
+
repo_id: RepoIdArg,
|
| 304 |
+
tag: Annotated[
|
| 305 |
+
str,
|
| 306 |
+
typer.Argument(
|
| 307 |
+
help="The name of the tag to delete.",
|
| 308 |
+
),
|
| 309 |
+
],
|
| 310 |
+
yes: Annotated[
|
| 311 |
+
bool,
|
| 312 |
+
typer.Option(
|
| 313 |
+
"-y",
|
| 314 |
+
"--yes",
|
| 315 |
+
help="Answer Yes to prompt automatically",
|
| 316 |
+
),
|
| 317 |
+
] = False,
|
| 318 |
+
token: TokenOpt = None,
|
| 319 |
+
repo_type: RepoTypeOpt = RepoType.model,
|
| 320 |
+
) -> None:
|
| 321 |
+
"""Delete a tag for a repo."""
|
| 322 |
+
repo_type_str = repo_type.value
|
| 323 |
+
print(f"You are about to delete tag {ANSI.bold(tag)} on {repo_type_str} {ANSI.bold(repo_id)}")
|
| 324 |
+
if not yes:
|
| 325 |
+
choice = input("Proceed? [Y/n] ").lower()
|
| 326 |
+
if choice not in ("", "y", "yes"):
|
| 327 |
+
print("Abort")
|
| 328 |
+
raise typer.Exit()
|
| 329 |
+
api = get_hf_api(token=token)
|
| 330 |
+
try:
|
| 331 |
+
api.delete_tag(repo_id=repo_id, tag=tag, repo_type=repo_type_str)
|
| 332 |
+
except RepositoryNotFoundError as e:
|
| 333 |
+
raise CLIError(f"{repo_type_str.capitalize()} '{repo_id}' not found.") from e
|
| 334 |
+
except RevisionNotFoundError as e:
|
| 335 |
+
raise CLIError(f"Tag '{tag}' not found on '{repo_id}'.") from e
|
| 336 |
+
print(f"Tag {ANSI.bold(tag)} deleted on {ANSI.bold(repo_id)}")
|