content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\huggingface_cli.cpython-313.pyc
huggingface_cli.cpython-313.pyc
Other
2,648
0.8
0
0
awesome-app
580
2023-09-07T17:48:26.935817
BSD-3-Clause
false
012fc0c8eb43a6966cd5814cbcb34cae
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\lfs.cpython-313.pyc
lfs.cpython-313.pyc
Other
8,357
0.8
0.104762
0
awesome-app
593
2023-11-21T20:31:35.527456
BSD-3-Clause
false
620700b165c63dbb4b70b0ca2cec1b0b
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\repo.cpython-313.pyc
repo.cpython-313.pyc
Other
6,764
0.8
0.089286
0.043478
react-lib
78
2025-03-19T06:43:56.162606
MIT
false
3279ed90b2d80413b000c4968d867ac8
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\repo_files.cpython-313.pyc
repo_files.cpython-313.pyc
Other
5,572
0.95
0.015152
0.101695
awesome-app
688
2024-11-07T08:30:34.990002
BSD-3-Clause
false
5bb5b52f33663f9352b374db1cb4b0f8
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\scan_cache.cpython-313.pyc
scan_cache.cpython-313.pyc
Other
9,632
0.95
0.027778
0
react-lib
721
2024-09-10T20:09:25.751555
GPL-3.0
false
a3dcee86d4b0bfaa3c0ba9e384169b7d
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\tag.cpython-313.pyc
tag.cpython-313.pyc
Other
10,403
0.8
0.073529
0
react-lib
759
2024-04-28T13:37:51.655811
Apache-2.0
false
b282644631e5f6bbb9e0e013cfa8e894
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\upload.cpython-313.pyc
upload.cpython-313.pyc
Other
14,933
0.8
0.05303
0.090909
vue-tools
56
2024-01-28T07:35:29.425399
Apache-2.0
false
bd9c083af92155fcb60589264126b755
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\upload_large_folder.cpython-313.pyc
upload_large_folder.cpython-313.pyc
Other
6,611
0.8
0.096774
0
python-kit
235
2024-06-28T19:52:23.619732
GPL-3.0
false
3e19eaddb3aede4eaa88b71311161d49
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\user.cpython-313.pyc
user.cpython-313.pyc
Other
10,304
0.8
0.078652
0.064935
vue-tools
930
2024-09-27T15:15:16.826572
BSD-3-Clause
false
f50c376f9139b7aa4f39182d0b3f7210
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\version.cpython-313.pyc
version.cpython-313.pyc
Other
1,525
0.8
0
0
node-utils
160
2024-02-06T15:29:22.488272
BSD-3-Clause
false
66718cf4049d58c6da5ac99f69f5331c
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\_cli_utils.cpython-313.pyc
_cli_utils.cpython-313.pyc
Other
3,436
0.8
0.068966
0
python-kit
574
2024-04-21T12:26:14.567672
MIT
false
412dd812db103663b7fbadeb549f3c15
\n\n
.venv\Lib\site-packages\huggingface_hub\commands\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
1,031
0.8
0
0
react-lib
162
2024-08-27T04:57:20.828537
BSD-3-Clause
false
c035f3c148051f7e89f0471eca1dbbde
# coding=utf-8\n# Copyright 2023-present, the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains utilities used by both the sync and async inference clients."""\n\nimport base64\nimport io\nimport json\nimport logging\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n AsyncIterable,\n BinaryIO,\n ContextManager,\n Dict,\n Generator,\n Iterable,\n List,\n Literal,\n NoReturn,\n Optional,\n Union,\n overload,\n)\n\nfrom requests import HTTPError\n\nfrom huggingface_hub.errors import (\n GenerationError,\n IncompleteGenerationError,\n OverloadedError,\n TextGenerationError,\n UnknownError,\n ValidationError,\n)\n\nfrom ..utils import get_session, is_aiohttp_available, is_numpy_available, is_pillow_available\nfrom ._generated.types import ChatCompletionStreamOutput, TextGenerationStreamOutput\n\n\nif TYPE_CHECKING:\n from aiohttp import ClientResponse, ClientSession\n from PIL.Image import Image\n\n# TYPES\nUrlT = str\nPathT = Union[str, Path]\nBinaryT = Union[bytes, BinaryIO]\nContentT = Union[BinaryT, PathT, UrlT, "Image"]\n\n# Use to set a Accept: image/png header\nTASKS_EXPECTING_IMAGES = {"text-to-image", "image-to-image"}\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass RequestParameters:\n url: str\n task: str\n model: Optional[str]\n json: Optional[Union[str, Dict, List]]\n data: Optional[ContentT]\n headers: Dict[str, Any]\n\n\n# Add dataclass for ModelStatus. We use this dataclass in get_model_status function.\n@dataclass\nclass ModelStatus:\n """\n This Dataclass represents the model status in the HF Inference API.\n\n Args:\n loaded (`bool`):\n If the model is currently loaded into HF's Inference API. Models\n are loaded on-demand, leading to the user's first request taking longer.\n If a model is loaded, you can be assured that it is in a healthy state.\n state (`str`):\n The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'.\n If a model's state is 'Loadable', it's not too big and has a supported\n backend. Loadable models are automatically loaded when the user first\n requests inference on the endpoint. This means it is transparent for the\n user to load a model, except that the first call takes longer to complete.\n compute_type (`Dict`):\n Information about the compute resource the model is using or will use, such as 'gpu' type and number of\n replicas.\n framework (`str`):\n The name of the framework that the model was built with, such as 'transformers'\n or 'text-generation-inference'.\n """\n\n loaded: bool\n state: str\n compute_type: Dict\n framework: str\n\n\n## IMPORT UTILS\n\n\ndef _import_aiohttp():\n # Make sure `aiohttp` is installed on the machine.\n if not is_aiohttp_available():\n raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).")\n import aiohttp\n\n return aiohttp\n\n\ndef _import_numpy():\n """Make sure `numpy` is installed on the machine."""\n if not is_numpy_available():\n raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).")\n import numpy\n\n return numpy\n\n\ndef _import_pil_image():\n """Make sure `PIL` is installed on the machine."""\n if not is_pillow_available():\n raise ImportError(\n "Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be"\n " post-processed, use `client.post(...)` and get the raw response from the server."\n )\n from PIL import Image\n\n return Image\n\n\n## ENCODING / DECODING UTILS\n\n\n@overload\ndef _open_as_binary(\n content: ContentT,\n) -> ContextManager[BinaryT]: ... # means "if input is not None, output is not None"\n\n\n@overload\ndef _open_as_binary(\n content: Literal[None],\n) -> ContextManager[Literal[None]]: ... # means "if input is None, output is None"\n\n\n@contextmanager # type: ignore\ndef _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]:\n """Open `content` as a binary file, either from a URL, a local path, or raw bytes.\n\n Do nothing if `content` is None,\n\n TODO: handle base64 as input\n """\n # If content is a string => must be either a URL or a path\n if isinstance(content, str):\n if content.startswith("https://") or content.startswith("http://"):\n logger.debug(f"Downloading content from {content}")\n yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ?\n return\n content = Path(content)\n if not content.exists():\n raise FileNotFoundError(\n f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local"\n " file. To pass raw content, please encode it as bytes first."\n )\n\n # If content is a Path => open it\n if isinstance(content, Path):\n logger.debug(f"Opening content from {content}")\n with content.open("rb") as f:\n yield f\n elif hasattr(content, "save"): # PIL Image\n logger.debug("Converting PIL Image to bytes")\n buffer = io.BytesIO()\n content.save(buffer, format="PNG")\n buffer.seek(0)\n yield buffer\n else:\n # Otherwise: already a file-like object or None\n yield content\n\n\ndef _b64_encode(content: ContentT) -> str:\n """Encode a raw file (image, audio) into base64. Can be bytes, an opened file, a path or a URL."""\n with _open_as_binary(content) as data:\n data_as_bytes = data if isinstance(data, bytes) else data.read()\n return base64.b64encode(data_as_bytes).decode()\n\n\ndef _b64_to_image(encoded_image: str) -> "Image":\n """Parse a base64-encoded string into a PIL Image."""\n Image = _import_pil_image()\n return Image.open(io.BytesIO(base64.b64decode(encoded_image)))\n\n\ndef _bytes_to_list(content: bytes) -> List:\n """Parse bytes from a Response object into a Python list.\n\n Expects the response body to be JSON-encoded data.\n\n NOTE: This is exactly the same implementation as `_bytes_to_dict` and will not complain if the returned data is a\n dictionary. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.\n """\n return json.loads(content.decode())\n\n\ndef _bytes_to_dict(content: bytes) -> Dict:\n """Parse bytes from a Response object into a Python dictionary.\n\n Expects the response body to be JSON-encoded data.\n\n NOTE: This is exactly the same implementation as `_bytes_to_list` and will not complain if the returned data is a\n list. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect.\n """\n return json.loads(content.decode())\n\n\ndef _bytes_to_image(content: bytes) -> "Image":\n """Parse bytes from a Response object into a PIL Image.\n\n Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead.\n """\n Image = _import_pil_image()\n return Image.open(io.BytesIO(content))\n\n\ndef _as_dict(response: Union[bytes, Dict]) -> Dict:\n return json.loads(response) if isinstance(response, bytes) else response\n\n\n## PAYLOAD UTILS\n\n\n## STREAMING UTILS\n\n\ndef _stream_text_generation_response(\n bytes_output_as_lines: Iterable[bytes], details: bool\n) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]:\n """Used in `InferenceClient.text_generation`."""\n # Parse ServerSentEvents\n for byte_payload in bytes_output_as_lines:\n try:\n output = _format_text_generation_stream_output(byte_payload, details)\n except StopIteration:\n break\n if output is not None:\n yield output\n\n\nasync def _async_stream_text_generation_response(\n bytes_output_as_lines: AsyncIterable[bytes], details: bool\n) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]:\n """Used in `AsyncInferenceClient.text_generation`."""\n # Parse ServerSentEvents\n async for byte_payload in bytes_output_as_lines:\n try:\n output = _format_text_generation_stream_output(byte_payload, details)\n except StopIteration:\n break\n if output is not None:\n yield output\n\n\ndef _format_text_generation_stream_output(\n byte_payload: bytes, details: bool\n) -> Optional[Union[str, TextGenerationStreamOutput]]:\n if not byte_payload.startswith(b"data:"):\n return None # empty line\n\n if byte_payload.strip() == b"data: [DONE]":\n raise StopIteration("[DONE] signal received.")\n\n # Decode payload\n payload = byte_payload.decode("utf-8")\n json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))\n\n # Either an error as being returned\n if json_payload.get("error") is not None:\n raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type"))\n\n # Or parse token payload\n output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload)\n return output.token.text if not details else output\n\n\ndef _stream_chat_completion_response(\n bytes_lines: Iterable[bytes],\n) -> Iterable[ChatCompletionStreamOutput]:\n """Used in `InferenceClient.chat_completion` if model is served with TGI."""\n for item in bytes_lines:\n try:\n output = _format_chat_completion_stream_output(item)\n except StopIteration:\n break\n if output is not None:\n yield output\n\n\nasync def _async_stream_chat_completion_response(\n bytes_lines: AsyncIterable[bytes],\n) -> AsyncIterable[ChatCompletionStreamOutput]:\n """Used in `AsyncInferenceClient.chat_completion`."""\n async for item in bytes_lines:\n try:\n output = _format_chat_completion_stream_output(item)\n except StopIteration:\n break\n if output is not None:\n yield output\n\n\ndef _format_chat_completion_stream_output(\n byte_payload: bytes,\n) -> Optional[ChatCompletionStreamOutput]:\n if not byte_payload.startswith(b"data:"):\n return None # empty line\n\n if byte_payload.strip() == b"data: [DONE]":\n raise StopIteration("[DONE] signal received.")\n\n # Decode payload\n payload = byte_payload.decode("utf-8")\n json_payload = json.loads(payload.lstrip("data:").rstrip("/n"))\n\n # Either an error as being returned\n if json_payload.get("error") is not None:\n raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type"))\n\n # Or parse token payload\n return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload)\n\n\nasync def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]:\n async for byte_payload in response.content:\n yield byte_payload.strip()\n await client.close()\n\n\n# "TGI servers" are servers running with the `text-generation-inference` backend.\n# This backend is the go-to solution to run large language models at scale. However,\n# for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference`\n# solution is still in use.\n#\n# Both approaches have very similar APIs, but not exactly the same. What we do first in\n# the `text_generation` method is to assume the model is served via TGI. If we realize\n# it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the\n# default API with a warning message. When that's the case, We remember the unsupported\n# attributes for this model in the `_UNSUPPORTED_TEXT_GENERATION_KWARGS` global variable.\n#\n# In addition, TGI servers have a built-in API route for chat-completion, which is not\n# available on the default API. We use this route to provide a more consistent behavior\n# when available.\n#\n# For more details, see https://github.com/huggingface/text-generation-inference and\n# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task.\n\n_UNSUPPORTED_TEXT_GENERATION_KWARGS: Dict[Optional[str], List[str]] = {}\n\n\ndef _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs: List[str]) -> None:\n _UNSUPPORTED_TEXT_GENERATION_KWARGS.setdefault(model, []).extend(unsupported_kwargs)\n\n\ndef _get_unsupported_text_generation_kwargs(model: Optional[str]) -> List[str]:\n return _UNSUPPORTED_TEXT_GENERATION_KWARGS.get(model, [])\n\n\n# TEXT GENERATION ERRORS\n# ----------------------\n# Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation\n# inference project (https://github.com/huggingface/text-generation-inference).\n# ----------------------\n\n\ndef raise_text_generation_error(http_error: HTTPError) -> NoReturn:\n """\n Try to parse text-generation-inference error message and raise HTTPError in any case.\n\n Args:\n error (`HTTPError`):\n The HTTPError that have been raised.\n """\n # Try to parse a Text Generation Inference error\n\n try:\n # Hacky way to retrieve payload in case of aiohttp error\n payload = getattr(http_error, "response_error_payload", None) or http_error.response.json()\n error = payload.get("error")\n error_type = payload.get("error_type")\n except Exception: # no payload\n raise http_error\n\n # If error_type => more information than `hf_raise_for_status`\n if error_type is not None:\n exception = _parse_text_generation_error(error, error_type)\n raise exception from http_error\n\n # Otherwise, fallback to default error\n raise http_error\n\n\ndef _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError:\n if error_type == "generation":\n return GenerationError(error) # type: ignore\n if error_type == "incomplete_generation":\n return IncompleteGenerationError(error) # type: ignore\n if error_type == "overloaded":\n return OverloadedError(error) # type: ignore\n if error_type == "validation":\n return ValidationError(error) # type: ignore\n return UnknownError(error) # type: ignore\n
.venv\Lib\site-packages\huggingface_hub\inference\_common.py
_common.py
Python
14,781
0.95
0.173302
0.178788
react-lib
301
2025-02-08T10:01:26.748005
BSD-3-Clause
false
69a7c78f4d18bcd6025117a5a91e880c
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nAudioClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]\n\n\n@dataclass_with_extra\nclass AudioClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Audio Classification"""\n\n function_to_apply: Optional["AudioClassificationOutputTransform"] = None\n """The function to apply to the model outputs in order to retrieve the scores."""\n top_k: Optional[int] = None\n """When specified, limits the output to the top K most probable classes."""\n\n\n@dataclass_with_extra\nclass AudioClassificationInput(BaseInferenceType):\n """Inputs for Audio Classification inference"""\n\n inputs: str\n """The input audio data as a base64-encoded string. If no `parameters` are provided, you can\n also provide the audio data as a raw bytes payload.\n """\n parameters: Optional[AudioClassificationParameters] = None\n """Additional inference parameters for Audio Classification"""\n\n\n@dataclass_with_extra\nclass AudioClassificationOutputElement(BaseInferenceType):\n """Outputs for Audio Classification inference"""\n\n label: str\n """The predicted class label."""\n score: float\n """The corresponding probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\audio_classification.py
audio_classification.py
Python
1,573
0.95
0.209302
0.16129
react-lib
180
2023-10-17T06:20:26.019579
MIT
false
8dd808633fc17c2470669acbf686a760
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass AudioToAudioInput(BaseInferenceType):\n """Inputs for Audio to Audio inference"""\n\n inputs: Any\n """The input audio data"""\n\n\n@dataclass_with_extra\nclass AudioToAudioOutputElement(BaseInferenceType):\n """Outputs of inference for the Audio To Audio task\n A generated audio file with its label.\n """\n\n blob: Any\n """The generated audio file."""\n content_type: str\n """The content type of audio file."""\n label: str\n """The label of the audio file."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\audio_to_audio.py
audio_to_audio.py
Python
891
0.95
0.133333
0.217391
vue-tools
635
2023-10-31T14:53:46.035214
Apache-2.0
false
11f1d199997ea372cadfe248703d35cb
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import List, Literal, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nAutomaticSpeechRecognitionEarlyStoppingEnum = Literal["never"]\n\n\n@dataclass_with_extra\nclass AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType):\n """Parametrization of the text generation process"""\n\n do_sample: Optional[bool] = None\n """Whether to use sampling instead of greedy decoding when generating new tokens."""\n early_stopping: Optional[Union[bool, "AutomaticSpeechRecognitionEarlyStoppingEnum"]] = None\n """Controls the stopping condition for beam-based methods."""\n epsilon_cutoff: Optional[float] = None\n """If set to float strictly between 0 and 1, only tokens with a conditional probability\n greater than epsilon_cutoff will be sampled. In the paper, suggested values range from\n 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language\n Model Desmoothing](https://hf.co/papers/2210.15191) for more details.\n """\n eta_cutoff: Optional[float] = None\n """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to\n float strictly between 0 and 1, a token is only considered if it is greater than either\n eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter\n term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In\n the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.\n See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)\n for more details.\n """\n max_length: Optional[int] = None\n """The maximum length (in tokens) of the generated text, including the input."""\n max_new_tokens: Optional[int] = None\n """The maximum number of tokens to generate. Takes precedence over max_length."""\n min_length: Optional[int] = None\n """The minimum length (in tokens) of the generated text, including the input."""\n min_new_tokens: Optional[int] = None\n """The minimum number of tokens to generate. Takes precedence over min_length."""\n num_beam_groups: Optional[int] = None\n """Number of groups to divide num_beams into in order to ensure diversity among different\n groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.\n """\n num_beams: Optional[int] = None\n """Number of beams to use for beam search."""\n penalty_alpha: Optional[float] = None\n """The value balances the model confidence and the degeneration penalty in contrastive\n search decoding.\n """\n temperature: Optional[float] = None\n """The value used to modulate the next token probabilities."""\n top_k: Optional[int] = None\n """The number of highest probability vocabulary tokens to keep for top-k-filtering."""\n top_p: Optional[float] = None\n """If set to float < 1, only the smallest set of most probable tokens with probabilities\n that add up to top_p or higher are kept for generation.\n """\n typical_p: Optional[float] = None\n """Local typicality measures how similar the conditional probability of predicting a target\n token next is to the expected conditional probability of predicting a random token next,\n given the partial text already generated. If set to float < 1, the smallest set of the\n most locally typical tokens with probabilities that add up to typical_p or higher are\n kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.\n """\n use_cache: Optional[bool] = None\n """Whether the model should use the past last key/values attentions to speed up decoding"""\n\n\n@dataclass_with_extra\nclass AutomaticSpeechRecognitionParameters(BaseInferenceType):\n """Additional inference parameters for Automatic Speech Recognition"""\n\n generation_parameters: Optional[AutomaticSpeechRecognitionGenerationParameters] = None\n """Parametrization of the text generation process"""\n return_timestamps: Optional[bool] = None\n """Whether to output corresponding timestamps with the generated text"""\n\n\n@dataclass_with_extra\nclass AutomaticSpeechRecognitionInput(BaseInferenceType):\n """Inputs for Automatic Speech Recognition inference"""\n\n inputs: str\n """The input audio data as a base64-encoded string. If no `parameters` are provided, you can\n also provide the audio data as a raw bytes payload.\n """\n parameters: Optional[AutomaticSpeechRecognitionParameters] = None\n """Additional inference parameters for Automatic Speech Recognition"""\n\n\n@dataclass_with_extra\nclass AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):\n text: str\n """A chunk of text identified by the model"""\n timestamp: List[float]\n """The start and end timestamps corresponding with the text"""\n\n\n@dataclass_with_extra\nclass AutomaticSpeechRecognitionOutput(BaseInferenceType):\n """Outputs of inference for the Automatic Speech Recognition task"""\n\n text: str\n """The recognized text."""\n chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None\n """When returnTimestamps is enabled, chunks contains a list of audio chunks identified by\n the model.\n """\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\automatic_speech_recognition.py
automatic_speech_recognition.py
Python
5,515
0.95
0.168142
0.052083
python-kit
529
2025-03-04T15:17:47.717045
GPL-3.0
false
9c95b3f65b3eb0ad2bde29b7f01c0b64
# Copyright 2024 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the "License");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an "AS IS" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"""Contains a base class for all inference types."""\n\nimport inspect\nimport json\nfrom dataclasses import asdict, dataclass\nfrom typing import Any, Dict, List, Type, TypeVar, Union, get_args\n\n\nT = TypeVar("T", bound="BaseInferenceType")\n\n\ndef _repr_with_extra(self):\n fields = list(self.__dataclass_fields__.keys())\n other_fields = list(k for k in self.__dict__ if k not in fields)\n return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"\n\n\ndef dataclass_with_extra(cls: Type[T]) -> Type[T]:\n """Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.\n\n This decorator only works with dataclasses that inherit from `BaseInferenceType`.\n """\n cls = dataclass(cls)\n cls.__repr__ = _repr_with_extra # type: ignore[method-assign]\n return cls\n\n\n@dataclass\nclass BaseInferenceType(dict):\n """Base class for all inference types.\n\n Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future.\n\n Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields\n are made optional, and non-expected fields are added as dict attributes).\n """\n\n @classmethod\n def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]:\n """Alias to parse server response and return a single instance.\n\n See `parse_obj` for more details.\n """\n output = cls.parse_obj(data)\n if not isinstance(output, list):\n raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.")\n return output\n\n @classmethod\n def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T:\n """Alias to parse server response and return a single instance.\n\n See `parse_obj` for more details.\n """\n output = cls.parse_obj(data)\n if isinstance(output, list):\n raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.")\n return output\n\n @classmethod\n def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]:\n """Parse server response as a dataclass or list of dataclasses.\n\n To enable future-compatibility, we want to handle cases where the server return more fields than expected.\n In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are\n added as dict attributes.\n """\n # Parse server response (from bytes)\n if isinstance(data, bytes):\n data = data.decode()\n if isinstance(data, str):\n data = json.loads(data)\n\n # If a list, parse each item individually\n if isinstance(data, List):\n return [cls.parse_obj(d) for d in data] # type: ignore [misc]\n\n # At this point, we expect a dict\n if not isinstance(data, dict):\n raise ValueError(f"Invalid data type: {type(data)}")\n\n init_values = {}\n other_values = {}\n for key, value in data.items():\n key = normalize_key(key)\n if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init:\n if isinstance(value, dict) or isinstance(value, list):\n field_type = cls.__dataclass_fields__[key].type\n\n # if `field_type` is a `BaseInferenceType`, parse it\n if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType):\n value = field_type.parse_obj(value)\n\n # otherwise, recursively parse nested dataclasses (if possible)\n # `get_args` returns handle Union and Optional for us\n else:\n expected_types = get_args(field_type)\n for expected_type in expected_types:\n if getattr(expected_type, "_name", None) == "List":\n expected_type = get_args(expected_type)[\n 0\n ] # assume same type for all items in the list\n if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType):\n value = expected_type.parse_obj(value)\n break\n init_values[key] = value\n else:\n other_values[key] = value\n\n # Make all missing fields default to None\n # => ensure that dataclass initialization will never fail even if the server does not return all fields.\n for key in cls.__dataclass_fields__:\n if key not in init_values:\n init_values[key] = None\n\n # Initialize dataclass with expected values\n item = cls(**init_values)\n\n # Add remaining fields as dict attributes\n item.update(other_values)\n\n # Add remaining fields as extra dataclass fields.\n # They won't be part of the dataclass fields but will be accessible as attributes.\n # Use @dataclass_with_extra to show them in __repr__.\n item.__dict__.update(other_values)\n return item\n\n def __post_init__(self):\n self.update(asdict(self))\n\n def __setitem__(self, __key: Any, __value: Any) -> None:\n # Hacky way to keep dataclass values in sync when dict is updated\n super().__setitem__(__key, __value)\n if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value:\n self.__setattr__(__key, __value)\n return\n\n def __setattr__(self, __name: str, __value: Any) -> None:\n # Hacky way to keep dict values is sync when dataclass is updated\n super().__setattr__(__name, __value)\n if self.get(__name) != __value:\n self[__name] = __value\n return\n\n\ndef normalize_key(key: str) -> str:\n # e.g "content-type" -> "content_type", "Accept" -> "accept"\n return key.replace("-", "_").replace(" ", "_").lower()\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\base.py
base.py
Python
6,751
0.95
0.285714
0.224806
python-kit
105
2024-02-03T19:49:12.284381
GPL-3.0
false
a13828852539ca0582f9047d629ad04f
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass ChatCompletionInputURL(BaseInferenceType):\n url: str\n\n\nChatCompletionInputMessageChunkType = Literal["text", "image_url"]\n\n\n@dataclass_with_extra\nclass ChatCompletionInputMessageChunk(BaseInferenceType):\n type: "ChatCompletionInputMessageChunkType"\n image_url: Optional[ChatCompletionInputURL] = None\n text: Optional[str] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionInputFunctionDefinition(BaseInferenceType):\n name: str\n parameters: Any\n description: Optional[str] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionInputToolCall(BaseInferenceType):\n function: ChatCompletionInputFunctionDefinition\n id: str\n type: str\n\n\n@dataclass_with_extra\nclass ChatCompletionInputMessage(BaseInferenceType):\n role: str\n content: Optional[Union[List[ChatCompletionInputMessageChunk], str]] = None\n name: Optional[str] = None\n tool_calls: Optional[List[ChatCompletionInputToolCall]] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionInputJSONSchema(BaseInferenceType):\n name: str\n """\n The name of the response format.\n """\n description: Optional[str] = None\n """\n A description of what the response format is for, used by the model to determine\n how to respond in the format.\n """\n schema: Optional[Dict[str, object]] = None\n """\n The schema for the response format, described as a JSON Schema object. Learn how\n to build JSON schemas [here](https://json-schema.org/).\n """\n strict: Optional[bool] = None\n """\n Whether to enable strict schema adherence when generating the output. If set to\n true, the model will always follow the exact schema defined in the `schema`\n field.\n """\n\n\n@dataclass_with_extra\nclass ChatCompletionInputResponseFormatText(BaseInferenceType):\n type: Literal["text"]\n\n\n@dataclass_with_extra\nclass ChatCompletionInputResponseFormatJSONSchema(BaseInferenceType):\n type: Literal["json_schema"]\n json_schema: ChatCompletionInputJSONSchema\n\n\n@dataclass_with_extra\nclass ChatCompletionInputResponseFormatJSONObject(BaseInferenceType):\n type: Literal["json_object"]\n\n\nChatCompletionInputGrammarType = Union[\n ChatCompletionInputResponseFormatText,\n ChatCompletionInputResponseFormatJSONSchema,\n ChatCompletionInputResponseFormatJSONObject,\n]\n\n\n@dataclass_with_extra\nclass ChatCompletionInputStreamOptions(BaseInferenceType):\n include_usage: Optional[bool] = None\n """If set, an additional chunk will be streamed before the data: [DONE] message. The usage\n field on this chunk shows the token usage statistics for the entire request, and the\n choices field will always be an empty array. All other chunks will also include a usage\n field, but with a null value.\n """\n\n\n@dataclass_with_extra\nclass ChatCompletionInputFunctionName(BaseInferenceType):\n name: str\n\n\n@dataclass_with_extra\nclass ChatCompletionInputToolChoiceClass(BaseInferenceType):\n function: ChatCompletionInputFunctionName\n\n\nChatCompletionInputToolChoiceEnum = Literal["auto", "none", "required"]\n\n\n@dataclass_with_extra\nclass ChatCompletionInputTool(BaseInferenceType):\n function: ChatCompletionInputFunctionDefinition\n type: str\n\n\n@dataclass_with_extra\nclass ChatCompletionInput(BaseInferenceType):\n """Chat Completion Input.\n Auto-generated from TGI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.\n """\n\n messages: List[ChatCompletionInputMessage]\n """A list of messages comprising the conversation so far."""\n frequency_penalty: Optional[float] = None\n """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing\n frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n """\n logit_bias: Optional[List[float]] = None\n """UNUSED\n Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON\n object that maps tokens\n (specified by their token ID in the tokenizer) to an associated bias value from -100 to\n 100. Mathematically,\n the bias is added to the logits generated by the model prior to sampling. The exact\n effect will vary per model,\n but values between -1 and 1 should decrease or increase likelihood of selection; values\n like -100 or 100 should\n result in a ban or exclusive selection of the relevant token.\n """\n logprobs: Optional[bool] = None\n """Whether to return log probabilities of the output tokens or not. If true, returns the log\n probabilities of each\n output token returned in the content of message.\n """\n max_tokens: Optional[int] = None\n """The maximum number of tokens that can be generated in the chat completion."""\n model: Optional[str] = None\n """[UNUSED] ID of the model to use. See the model endpoint compatibility table for details\n on which models work with the Chat API.\n """\n n: Optional[int] = None\n """UNUSED\n How many chat completion choices to generate for each input message. Note that you will\n be charged based on the\n number of generated tokens across all of the choices. Keep n as 1 to minimize costs.\n """\n presence_penalty: Optional[float] = None\n """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they\n appear in the text so far,\n increasing the model's likelihood to talk about new topics\n """\n response_format: Optional[ChatCompletionInputGrammarType] = None\n seed: Optional[int] = None\n stop: Optional[List[str]] = None\n """Up to 4 sequences where the API will stop generating further tokens."""\n stream: Optional[bool] = None\n stream_options: Optional[ChatCompletionInputStreamOptions] = None\n temperature: Optional[float] = None\n """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the\n output more random, while\n lower values like 0.2 will make it more focused and deterministic.\n We generally recommend altering this or `top_p` but not both.\n """\n tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None\n tool_prompt: Optional[str] = None\n """A prompt to be appended before the tools"""\n tools: Optional[List[ChatCompletionInputTool]] = None\n """A list of tools the model may call. Currently, only functions are supported as a tool.\n Use this to provide a list of\n functions the model may generate JSON inputs for.\n """\n top_logprobs: Optional[int] = None\n """An integer between 0 and 5 specifying the number of most likely tokens to return at each\n token position, each with\n an associated log probability. logprobs must be set to true if this parameter is used.\n """\n top_p: Optional[float] = None\n """An alternative to sampling with temperature, called nucleus sampling, where the model\n considers the results of the\n tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%\n probability mass are considered.\n """\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputTopLogprob(BaseInferenceType):\n logprob: float\n token: str\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputLogprob(BaseInferenceType):\n logprob: float\n token: str\n top_logprobs: List[ChatCompletionOutputTopLogprob]\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputLogprobs(BaseInferenceType):\n content: List[ChatCompletionOutputLogprob]\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputFunctionDefinition(BaseInferenceType):\n arguments: str\n name: str\n description: Optional[str] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputToolCall(BaseInferenceType):\n function: ChatCompletionOutputFunctionDefinition\n id: str\n type: str\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputMessage(BaseInferenceType):\n role: str\n content: Optional[str] = None\n tool_call_id: Optional[str] = None\n tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputComplete(BaseInferenceType):\n finish_reason: str\n index: int\n message: ChatCompletionOutputMessage\n logprobs: Optional[ChatCompletionOutputLogprobs] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionOutputUsage(BaseInferenceType):\n completion_tokens: int\n prompt_tokens: int\n total_tokens: int\n\n\n@dataclass_with_extra\nclass ChatCompletionOutput(BaseInferenceType):\n """Chat Completion Output.\n Auto-generated from TGI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.\n """\n\n choices: List[ChatCompletionOutputComplete]\n created: int\n id: str\n model: str\n system_fingerprint: str\n usage: ChatCompletionOutputUsage\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputFunction(BaseInferenceType):\n arguments: str\n name: Optional[str] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):\n function: ChatCompletionStreamOutputFunction\n id: str\n index: int\n type: str\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputDelta(BaseInferenceType):\n role: str\n content: Optional[str] = None\n tool_call_id: Optional[str] = None\n tool_calls: Optional[List[ChatCompletionStreamOutputDeltaToolCall]] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputTopLogprob(BaseInferenceType):\n logprob: float\n token: str\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputLogprob(BaseInferenceType):\n logprob: float\n token: str\n top_logprobs: List[ChatCompletionStreamOutputTopLogprob]\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputLogprobs(BaseInferenceType):\n content: List[ChatCompletionStreamOutputLogprob]\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputChoice(BaseInferenceType):\n delta: ChatCompletionStreamOutputDelta\n index: int\n finish_reason: Optional[str] = None\n logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutputUsage(BaseInferenceType):\n completion_tokens: int\n prompt_tokens: int\n total_tokens: int\n\n\n@dataclass_with_extra\nclass ChatCompletionStreamOutput(BaseInferenceType):\n """Chat Completion Stream Output.\n Auto-generated from TGI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.\n """\n\n choices: List[ChatCompletionStreamOutputChoice]\n created: int\n id: str\n model: str\n system_fingerprint: str\n usage: Optional[ChatCompletionStreamOutputUsage] = None\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\chat_completion.py
chat_completion.py
Python
11,182
0.95
0.130435
0.01845
react-lib
525
2023-07-23T19:23:23.012695
GPL-3.0
false
ef4ef2c3807e032ff7c897f41c249d06
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Dict, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass DepthEstimationInput(BaseInferenceType):\n """Inputs for Depth Estimation inference"""\n\n inputs: Any\n """The input image data"""\n parameters: Optional[Dict[str, Any]] = None\n """Additional inference parameters for Depth Estimation"""\n\n\n@dataclass_with_extra\nclass DepthEstimationOutput(BaseInferenceType):\n """Outputs of inference for the Depth Estimation task"""\n\n depth: Any\n """The predicted depth as an image"""\n predicted_depth: Any\n """The predicted depth as a tensor"""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\depth_estimation.py
depth_estimation.py
Python
929
0.95
0.178571
0.238095
react-lib
84
2024-03-31T10:31:46.626214
Apache-2.0
false
68be7b047b531e205c3ae0251915e3f9
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, List, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass DocumentQuestionAnsweringInputData(BaseInferenceType):\n """One (document, question) pair to answer"""\n\n image: Any\n """The image on which the question is asked"""\n question: str\n """A question to ask of the document"""\n\n\n@dataclass_with_extra\nclass DocumentQuestionAnsweringParameters(BaseInferenceType):\n """Additional inference parameters for Document Question Answering"""\n\n doc_stride: Optional[int] = None\n """If the words in the document are too long to fit with the question for the model, it will\n be split in several chunks with some overlap. This argument controls the size of that\n overlap.\n """\n handle_impossible_answer: Optional[bool] = None\n """Whether to accept impossible as an answer"""\n lang: Optional[str] = None\n """Language to use while running OCR. Defaults to english."""\n max_answer_len: Optional[int] = None\n """The maximum length of predicted answers (e.g., only answers with a shorter length are\n considered).\n """\n max_question_len: Optional[int] = None\n """The maximum length of the question after tokenization. It will be truncated if needed."""\n max_seq_len: Optional[int] = None\n """The maximum length of the total sentence (context + question) in tokens of each chunk\n passed to the model. The context will be split in several chunks (using doc_stride as\n overlap) if needed.\n """\n top_k: Optional[int] = None\n """The number of answers to return (will be chosen by order of likelihood). Can return less\n than top_k answers if there are not enough options available within the context.\n """\n word_boxes: Optional[List[Union[List[float], str]]] = None\n """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will\n skip the OCR step and use the provided bounding boxes instead.\n """\n\n\n@dataclass_with_extra\nclass DocumentQuestionAnsweringInput(BaseInferenceType):\n """Inputs for Document Question Answering inference"""\n\n inputs: DocumentQuestionAnsweringInputData\n """One (document, question) pair to answer"""\n parameters: Optional[DocumentQuestionAnsweringParameters] = None\n """Additional inference parameters for Document Question Answering"""\n\n\n@dataclass_with_extra\nclass DocumentQuestionAnsweringOutputElement(BaseInferenceType):\n """Outputs of inference for the Document Question Answering task"""\n\n answer: str\n """The answer to the question."""\n end: int\n """The end word index of the answer (in the OCR’d version of the input or provided word\n boxes).\n """\n score: float\n """The probability associated to the answer."""\n start: int\n """The start word index of the answer (in the OCR’d version of the input or provided word\n boxes).\n """\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\document_question_answering.py
document_question_answering.py
Python
3,202
0.95
0.1625
0.074627
react-lib
440
2024-12-29T09:44:10.774819
Apache-2.0
false
7dc0ecd20b5fcc5eda2c1c2069569f2f
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import List, Literal, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nFeatureExtractionInputTruncationDirection = Literal["Left", "Right"]\n\n\n@dataclass_with_extra\nclass FeatureExtractionInput(BaseInferenceType):\n """Feature Extraction Input.\n Auto-generated from TEI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.\n """\n\n inputs: Union[List[str], str]\n """The text or list of texts to embed."""\n normalize: Optional[bool] = None\n prompt_name: Optional[str] = None\n """The name of the prompt that should be used by for encoding. If not set, no prompt\n will be applied.\n Must be a key in the `sentence-transformers` configuration `prompts` dictionary.\n For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",\n ...},\n then the sentence "What is the capital of France?" will be encoded as\n "query: What is the capital of France?" because the prompt text will be prepended before\n any text to encode.\n """\n truncate: Optional[bool] = None\n truncation_direction: Optional["FeatureExtractionInputTruncationDirection"] = None\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\feature_extraction.py
feature_extraction.py
Python
1,537
0.95
0.083333
0.166667
python-kit
67
2025-07-07T01:58:05.334197
MIT
false
cdfd860795c8abb189fc338b841b0747
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, List, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass FillMaskParameters(BaseInferenceType):\n """Additional inference parameters for Fill Mask"""\n\n targets: Optional[List[str]] = None\n """When passed, the model will limit the scores to the passed targets instead of looking up\n in the whole vocabulary. If the provided targets are not in the model vocab, they will be\n tokenized and the first resulting token will be used (with a warning, and that might be\n slower).\n """\n top_k: Optional[int] = None\n """When passed, overrides the number of predictions to return."""\n\n\n@dataclass_with_extra\nclass FillMaskInput(BaseInferenceType):\n """Inputs for Fill Mask inference"""\n\n inputs: str\n """The text with masked tokens"""\n parameters: Optional[FillMaskParameters] = None\n """Additional inference parameters for Fill Mask"""\n\n\n@dataclass_with_extra\nclass FillMaskOutputElement(BaseInferenceType):\n """Outputs of inference for the Fill Mask task"""\n\n score: float\n """The corresponding probability"""\n sequence: str\n """The corresponding input with the mask token prediction."""\n token: int\n """The predicted token id (to replace the masked one)."""\n token_str: Any\n fill_mask_output_token_str: Optional[str] = None\n """The predicted token (to replace the masked one)."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\fill_mask.py
fill_mask.py
Python
1,708
0.95
0.148936
0.135135
python-kit
985
2024-11-20T01:30:50.896586
GPL-3.0
false
64aa62288dd79bc06cba5b31affda27e
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nImageClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]\n\n\n@dataclass_with_extra\nclass ImageClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Image Classification"""\n\n function_to_apply: Optional["ImageClassificationOutputTransform"] = None\n """The function to apply to the model outputs in order to retrieve the scores."""\n top_k: Optional[int] = None\n """When specified, limits the output to the top K most probable classes."""\n\n\n@dataclass_with_extra\nclass ImageClassificationInput(BaseInferenceType):\n """Inputs for Image Classification inference"""\n\n inputs: str\n """The input image data as a base64-encoded string. If no `parameters` are provided, you can\n also provide the image data as a raw bytes payload.\n """\n parameters: Optional[ImageClassificationParameters] = None\n """Additional inference parameters for Image Classification"""\n\n\n@dataclass_with_extra\nclass ImageClassificationOutputElement(BaseInferenceType):\n """Outputs of inference for the Image Classification task"""\n\n label: str\n """The predicted class label."""\n score: float\n """The corresponding probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\image_classification.py
image_classification.py
Python
1,585
0.95
0.209302
0.16129
react-lib
328
2024-07-27T06:02:43.116625
BSD-3-Clause
false
e33d497904c6b389fd89b24553692726
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]\n\n\n@dataclass_with_extra\nclass ImageSegmentationParameters(BaseInferenceType):\n """Additional inference parameters for Image Segmentation"""\n\n mask_threshold: Optional[float] = None\n """Threshold to use when turning the predicted masks into binary values."""\n overlap_mask_area_threshold: Optional[float] = None\n """Mask overlap threshold to eliminate small, disconnected segments."""\n subtask: Optional["ImageSegmentationSubtask"] = None\n """Segmentation task to be performed, depending on model capabilities."""\n threshold: Optional[float] = None\n """Probability threshold to filter out predicted masks."""\n\n\n@dataclass_with_extra\nclass ImageSegmentationInput(BaseInferenceType):\n """Inputs for Image Segmentation inference"""\n\n inputs: str\n """The input image data as a base64-encoded string. If no `parameters` are provided, you can\n also provide the image data as a raw bytes payload.\n """\n parameters: Optional[ImageSegmentationParameters] = None\n """Additional inference parameters for Image Segmentation"""\n\n\n@dataclass_with_extra\nclass ImageSegmentationOutputElement(BaseInferenceType):\n """Outputs of inference for the Image Segmentation task\n A predicted mask / segment\n """\n\n label: str\n """The label of the predicted segment."""\n mask: str\n """The corresponding mask as a black-and-white image (base64-encoded)."""\n score: Optional[float] = None\n """The score or confidence degree the model has."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\image_segmentation.py
image_segmentation.py
Python
1,950
0.95
0.137255
0.128205
node-utils
100
2024-09-08T22:27:17.640573
MIT
false
934203eb59d29d586e29b0e9c4a2d6e0
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass ImageToImageTargetSize(BaseInferenceType):\n """The size in pixel of the output image."""\n\n height: int\n width: int\n\n\n@dataclass_with_extra\nclass ImageToImageParameters(BaseInferenceType):\n """Additional inference parameters for Image To Image"""\n\n guidance_scale: Optional[float] = None\n """For diffusion models. A higher guidance scale value encourages the model to generate\n images closely linked to the text prompt at the expense of lower image quality.\n """\n negative_prompt: Optional[str] = None\n """One prompt to guide what NOT to include in image generation."""\n num_inference_steps: Optional[int] = None\n """For diffusion models. The number of denoising steps. More denoising steps usually lead to\n a higher quality image at the expense of slower inference.\n """\n prompt: Optional[str] = None\n """The text prompt to guide the image generation."""\n target_size: Optional[ImageToImageTargetSize] = None\n """The size in pixel of the output image."""\n\n\n@dataclass_with_extra\nclass ImageToImageInput(BaseInferenceType):\n """Inputs for Image To Image inference"""\n\n inputs: str\n """The input image data as a base64-encoded string. If no `parameters` are provided, you can\n also provide the image data as a raw bytes payload.\n """\n parameters: Optional[ImageToImageParameters] = None\n """Additional inference parameters for Image To Image"""\n\n\n@dataclass_with_extra\nclass ImageToImageOutput(BaseInferenceType):\n """Outputs of inference for the Image To Image task"""\n\n image: Any\n """The output image returned as raw bytes in the payload."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\image_to_image.py
image_to_image.py
Python
2,044
0.95
0.142857
0.116279
node-utils
697
2025-01-24T10:56:36.939031
Apache-2.0
false
0b1e2bb886f3db76092c8edb14c7ba8c
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Literal, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nImageToTextEarlyStoppingEnum = Literal["never"]\n\n\n@dataclass_with_extra\nclass ImageToTextGenerationParameters(BaseInferenceType):\n """Parametrization of the text generation process"""\n\n do_sample: Optional[bool] = None\n """Whether to use sampling instead of greedy decoding when generating new tokens."""\n early_stopping: Optional[Union[bool, "ImageToTextEarlyStoppingEnum"]] = None\n """Controls the stopping condition for beam-based methods."""\n epsilon_cutoff: Optional[float] = None\n """If set to float strictly between 0 and 1, only tokens with a conditional probability\n greater than epsilon_cutoff will be sampled. In the paper, suggested values range from\n 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language\n Model Desmoothing](https://hf.co/papers/2210.15191) for more details.\n """\n eta_cutoff: Optional[float] = None\n """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to\n float strictly between 0 and 1, a token is only considered if it is greater than either\n eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter\n term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In\n the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.\n See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)\n for more details.\n """\n max_length: Optional[int] = None\n """The maximum length (in tokens) of the generated text, including the input."""\n max_new_tokens: Optional[int] = None\n """The maximum number of tokens to generate. Takes precedence over max_length."""\n min_length: Optional[int] = None\n """The minimum length (in tokens) of the generated text, including the input."""\n min_new_tokens: Optional[int] = None\n """The minimum number of tokens to generate. Takes precedence over min_length."""\n num_beam_groups: Optional[int] = None\n """Number of groups to divide num_beams into in order to ensure diversity among different\n groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.\n """\n num_beams: Optional[int] = None\n """Number of beams to use for beam search."""\n penalty_alpha: Optional[float] = None\n """The value balances the model confidence and the degeneration penalty in contrastive\n search decoding.\n """\n temperature: Optional[float] = None\n """The value used to modulate the next token probabilities."""\n top_k: Optional[int] = None\n """The number of highest probability vocabulary tokens to keep for top-k-filtering."""\n top_p: Optional[float] = None\n """If set to float < 1, only the smallest set of most probable tokens with probabilities\n that add up to top_p or higher are kept for generation.\n """\n typical_p: Optional[float] = None\n """Local typicality measures how similar the conditional probability of predicting a target\n token next is to the expected conditional probability of predicting a random token next,\n given the partial text already generated. If set to float < 1, the smallest set of the\n most locally typical tokens with probabilities that add up to typical_p or higher are\n kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.\n """\n use_cache: Optional[bool] = None\n """Whether the model should use the past last key/values attentions to speed up decoding"""\n\n\n@dataclass_with_extra\nclass ImageToTextParameters(BaseInferenceType):\n """Additional inference parameters for Image To Text"""\n\n generation_parameters: Optional[ImageToTextGenerationParameters] = None\n """Parametrization of the text generation process"""\n max_new_tokens: Optional[int] = None\n """The amount of maximum tokens to generate."""\n\n\n@dataclass_with_extra\nclass ImageToTextInput(BaseInferenceType):\n """Inputs for Image To Text inference"""\n\n inputs: Any\n """The input image data"""\n parameters: Optional[ImageToTextParameters] = None\n """Additional inference parameters for Image To Text"""\n\n\n@dataclass_with_extra\nclass ImageToTextOutput(BaseInferenceType):\n """Outputs of inference for the Image To Text task"""\n\n generated_text: Any\n image_to_text_output_generated_text: Optional[str] = None\n """The generated text."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\image_to_text.py
image_to_text.py
Python
4,810
0.95
0.18
0.058824
awesome-app
473
2023-10-25T06:33:07.151105
GPL-3.0
false
b1730b75a0a79eab3998074c5b327966
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass ObjectDetectionParameters(BaseInferenceType):\n """Additional inference parameters for Object Detection"""\n\n threshold: Optional[float] = None\n """The probability necessary to make a prediction."""\n\n\n@dataclass_with_extra\nclass ObjectDetectionInput(BaseInferenceType):\n """Inputs for Object Detection inference"""\n\n inputs: str\n """The input image data as a base64-encoded string. If no `parameters` are provided, you can\n also provide the image data as a raw bytes payload.\n """\n parameters: Optional[ObjectDetectionParameters] = None\n """Additional inference parameters for Object Detection"""\n\n\n@dataclass_with_extra\nclass ObjectDetectionBoundingBox(BaseInferenceType):\n """The predicted bounding box. Coordinates are relative to the top left corner of the input\n image.\n """\n\n xmax: int\n """The x-coordinate of the bottom-right corner of the bounding box."""\n xmin: int\n """The x-coordinate of the top-left corner of the bounding box."""\n ymax: int\n """The y-coordinate of the bottom-right corner of the bounding box."""\n ymin: int\n """The y-coordinate of the top-left corner of the bounding box."""\n\n\n@dataclass_with_extra\nclass ObjectDetectionOutputElement(BaseInferenceType):\n """Outputs of inference for the Object Detection task"""\n\n box: ObjectDetectionBoundingBox\n """The predicted bounding box. Coordinates are relative to the top left corner of the input\n image.\n """\n label: str\n """The predicted label for the bounding box."""\n score: float\n """The associated score / probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\object_detection.py
object_detection.py
Python
2,000
0.95
0.155172
0.111111
node-utils
363
2024-06-24T17:14:48.658406
MIT
false
56fa2f97690c8fa9c1257535b5a4d1ea
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass QuestionAnsweringInputData(BaseInferenceType):\n """One (context, question) pair to answer"""\n\n context: str\n """The context to be used for answering the question"""\n question: str\n """The question to be answered"""\n\n\n@dataclass_with_extra\nclass QuestionAnsweringParameters(BaseInferenceType):\n """Additional inference parameters for Question Answering"""\n\n align_to_words: Optional[bool] = None\n """Attempts to align the answer to real words. Improves quality on space separated\n languages. Might hurt on non-space-separated languages (like Japanese or Chinese)\n """\n doc_stride: Optional[int] = None\n """If the context is too long to fit with the question for the model, it will be split in\n several chunks with some overlap. This argument controls the size of that overlap.\n """\n handle_impossible_answer: Optional[bool] = None\n """Whether to accept impossible as an answer."""\n max_answer_len: Optional[int] = None\n """The maximum length of predicted answers (e.g., only answers with a shorter length are\n considered).\n """\n max_question_len: Optional[int] = None\n """The maximum length of the question after tokenization. It will be truncated if needed."""\n max_seq_len: Optional[int] = None\n """The maximum length of the total sentence (context + question) in tokens of each chunk\n passed to the model. The context will be split in several chunks (using docStride as\n overlap) if needed.\n """\n top_k: Optional[int] = None\n """The number of answers to return (will be chosen by order of likelihood). Note that we\n return less than topk answers if there are not enough options available within the\n context.\n """\n\n\n@dataclass_with_extra\nclass QuestionAnsweringInput(BaseInferenceType):\n """Inputs for Question Answering inference"""\n\n inputs: QuestionAnsweringInputData\n """One (context, question) pair to answer"""\n parameters: Optional[QuestionAnsweringParameters] = None\n """Additional inference parameters for Question Answering"""\n\n\n@dataclass_with_extra\nclass QuestionAnsweringOutputElement(BaseInferenceType):\n """Outputs of inference for the Question Answering task"""\n\n answer: str\n """The answer to the question."""\n end: int\n """The character position in the input where the answer ends."""\n score: float\n """The probability associated to the answer."""\n start: int\n """The character position in the input where the answer begins."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\question_answering.py
question_answering.py
Python
2,898
0.95
0.175676
0.081967
awesome-app
479
2023-07-19T05:25:46.471011
MIT
false
de7768f6cc6efc35c3e1537030435346
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Dict, List, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass SentenceSimilarityInputData(BaseInferenceType):\n sentences: List[str]\n """A list of strings which will be compared against the source_sentence."""\n source_sentence: str\n """The string that you wish to compare the other strings with. This can be a phrase,\n sentence, or longer passage, depending on the model being used.\n """\n\n\n@dataclass_with_extra\nclass SentenceSimilarityInput(BaseInferenceType):\n """Inputs for Sentence similarity inference"""\n\n inputs: SentenceSimilarityInputData\n parameters: Optional[Dict[str, Any]] = None\n """Additional inference parameters for Sentence Similarity"""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\sentence_similarity.py
sentence_similarity.py
Python
1,052
0.95
0.148148
0.238095
vue-tools
879
2024-03-23T05:45:11.599998
Apache-2.0
false
8eee9377727972ec57bd667815c707d1
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Dict, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nSummarizationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]\n\n\n@dataclass_with_extra\nclass SummarizationParameters(BaseInferenceType):\n """Additional inference parameters for summarization."""\n\n clean_up_tokenization_spaces: Optional[bool] = None\n """Whether to clean up the potential extra spaces in the text output."""\n generate_parameters: Optional[Dict[str, Any]] = None\n """Additional parametrization of the text generation algorithm."""\n truncation: Optional["SummarizationTruncationStrategy"] = None\n """The truncation strategy to use."""\n\n\n@dataclass_with_extra\nclass SummarizationInput(BaseInferenceType):\n """Inputs for Summarization inference"""\n\n inputs: str\n """The input text to summarize."""\n parameters: Optional[SummarizationParameters] = None\n """Additional inference parameters for summarization."""\n\n\n@dataclass_with_extra\nclass SummarizationOutput(BaseInferenceType):\n """Outputs of inference for the Summarization task"""\n\n summary_text: str\n """The summarized text."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\summarization.py
summarization.py
Python
1,487
0.95
0.170732
0.172414
node-utils
904
2023-07-12T20:17:13.675552
Apache-2.0
false
e08bb89d7f049f00346aba82cb83991a
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Dict, List, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass TableQuestionAnsweringInputData(BaseInferenceType):\n """One (table, question) pair to answer"""\n\n question: str\n """The question to be answered about the table"""\n table: Dict[str, List[str]]\n """The table to serve as context for the questions"""\n\n\nPadding = Literal["do_not_pad", "longest", "max_length"]\n\n\n@dataclass_with_extra\nclass TableQuestionAnsweringParameters(BaseInferenceType):\n """Additional inference parameters for Table Question Answering"""\n\n padding: Optional["Padding"] = None\n """Activates and controls padding."""\n sequential: Optional[bool] = None\n """Whether to do inference sequentially or as a batch. Batching is faster, but models like\n SQA require the inference to be done sequentially to extract relations within sequences,\n given their conversational nature.\n """\n truncation: Optional[bool] = None\n """Activates and controls truncation."""\n\n\n@dataclass_with_extra\nclass TableQuestionAnsweringInput(BaseInferenceType):\n """Inputs for Table Question Answering inference"""\n\n inputs: TableQuestionAnsweringInputData\n """One (table, question) pair to answer"""\n parameters: Optional[TableQuestionAnsweringParameters] = None\n """Additional inference parameters for Table Question Answering"""\n\n\n@dataclass_with_extra\nclass TableQuestionAnsweringOutputElement(BaseInferenceType):\n """Outputs of inference for the Table Question Answering task"""\n\n answer: str\n """The answer of the question given the table. If there is an aggregator, the answer will be\n preceded by `AGGREGATOR >`.\n """\n cells: List[str]\n """List of strings made up of the answer cell values."""\n coordinates: List[List[int]]\n """Coordinates of the cells of the answers."""\n aggregator: Optional[str] = None\n """If the model has an aggregator, this returns the aggregator."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\table_question_answering.py
table_question_answering.py
Python
2,293
0.95
0.145161
0.106383
python-kit
867
2025-02-06T19:39:10.162561
BSD-3-Clause
false
3e6427b7f1ae0487e651c1cd3fd2093d
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Dict, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nText2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]\n\n\n@dataclass_with_extra\nclass Text2TextGenerationParameters(BaseInferenceType):\n """Additional inference parameters for Text2text Generation"""\n\n clean_up_tokenization_spaces: Optional[bool] = None\n """Whether to clean up the potential extra spaces in the text output."""\n generate_parameters: Optional[Dict[str, Any]] = None\n """Additional parametrization of the text generation algorithm"""\n truncation: Optional["Text2TextGenerationTruncationStrategy"] = None\n """The truncation strategy to use"""\n\n\n@dataclass_with_extra\nclass Text2TextGenerationInput(BaseInferenceType):\n """Inputs for Text2text Generation inference"""\n\n inputs: str\n """The input text data"""\n parameters: Optional[Text2TextGenerationParameters] = None\n """Additional inference parameters for Text2text Generation"""\n\n\n@dataclass_with_extra\nclass Text2TextGenerationOutput(BaseInferenceType):\n """Outputs of inference for the Text2text Generation task"""\n\n generated_text: Any\n text2_text_generation_output_generated_text: Optional[str] = None\n """The generated text."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text2text_generation.py
text2text_generation.py
Python
1,609
0.95
0.166667
0.166667
awesome-app
62
2024-01-01T04:30:45.645730
GPL-3.0
false
2ea42a6502b9f7c265120165b1ad82c0
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nTextClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]\n\n\n@dataclass_with_extra\nclass TextClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Text Classification"""\n\n function_to_apply: Optional["TextClassificationOutputTransform"] = None\n """The function to apply to the model outputs in order to retrieve the scores."""\n top_k: Optional[int] = None\n """When specified, limits the output to the top K most probable classes."""\n\n\n@dataclass_with_extra\nclass TextClassificationInput(BaseInferenceType):\n """Inputs for Text Classification inference"""\n\n inputs: str\n """The text to classify"""\n parameters: Optional[TextClassificationParameters] = None\n """Additional inference parameters for Text Classification"""\n\n\n@dataclass_with_extra\nclass TextClassificationOutputElement(BaseInferenceType):\n """Outputs of inference for the Text Classification task"""\n\n label: str\n """The predicted class label."""\n score: float\n """The corresponding probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text_classification.py
text_classification.py
Python
1,445
0.95
0.219512
0.172414
awesome-app
604
2024-10-22T20:21:59.611995
BSD-3-Clause
false
2c3621bbdba9d7fad157cf50620eaaa2
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, List, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nTypeEnum = Literal["json", "regex", "json_schema"]\n\n\n@dataclass_with_extra\nclass TextGenerationInputGrammarType(BaseInferenceType):\n type: "TypeEnum"\n value: Any\n """A string that represents a [JSON Schema](https://json-schema.org/).\n JSON Schema is a declarative language that allows to annotate JSON documents\n with types and descriptions.\n """\n\n\n@dataclass_with_extra\nclass TextGenerationInputGenerateParameters(BaseInferenceType):\n adapter_id: Optional[str] = None\n """Lora adapter id"""\n best_of: Optional[int] = None\n """Generate best_of sequences and return the one if the highest token logprobs."""\n decoder_input_details: Optional[bool] = None\n """Whether to return decoder input token logprobs and ids."""\n details: Optional[bool] = None\n """Whether to return generation details."""\n do_sample: Optional[bool] = None\n """Activate logits sampling."""\n frequency_penalty: Optional[float] = None\n """The parameter for frequency penalty. 1.0 means no penalty\n Penalize new tokens based on their existing frequency in the text so far,\n decreasing the model's likelihood to repeat the same line verbatim.\n """\n grammar: Optional[TextGenerationInputGrammarType] = None\n max_new_tokens: Optional[int] = None\n """Maximum number of tokens to generate."""\n repetition_penalty: Optional[float] = None\n """The parameter for repetition penalty. 1.0 means no penalty.\n See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.\n """\n return_full_text: Optional[bool] = None\n """Whether to prepend the prompt to the generated text"""\n seed: Optional[int] = None\n """Random sampling seed."""\n stop: Optional[List[str]] = None\n """Stop generating tokens if a member of `stop` is generated."""\n temperature: Optional[float] = None\n """The value used to module the logits distribution."""\n top_k: Optional[int] = None\n """The number of highest probability vocabulary tokens to keep for top-k-filtering."""\n top_n_tokens: Optional[int] = None\n """The number of highest probability vocabulary tokens to keep for top-n-filtering."""\n top_p: Optional[float] = None\n """Top-p value for nucleus sampling."""\n truncate: Optional[int] = None\n """Truncate inputs tokens to the given size."""\n typical_p: Optional[float] = None\n """Typical Decoding mass\n See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666)\n for more information.\n """\n watermark: Optional[bool] = None\n """Watermarking with [A Watermark for Large Language\n Models](https://arxiv.org/abs/2301.10226).\n """\n\n\n@dataclass_with_extra\nclass TextGenerationInput(BaseInferenceType):\n """Text Generation Input.\n Auto-generated from TGI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.\n """\n\n inputs: str\n parameters: Optional[TextGenerationInputGenerateParameters] = None\n stream: Optional[bool] = None\n\n\nTextGenerationOutputFinishReason = Literal["length", "eos_token", "stop_sequence"]\n\n\n@dataclass_with_extra\nclass TextGenerationOutputPrefillToken(BaseInferenceType):\n id: int\n logprob: float\n text: str\n\n\n@dataclass_with_extra\nclass TextGenerationOutputToken(BaseInferenceType):\n id: int\n logprob: float\n special: bool\n text: str\n\n\n@dataclass_with_extra\nclass TextGenerationOutputBestOfSequence(BaseInferenceType):\n finish_reason: "TextGenerationOutputFinishReason"\n generated_text: str\n generated_tokens: int\n prefill: List[TextGenerationOutputPrefillToken]\n tokens: List[TextGenerationOutputToken]\n seed: Optional[int] = None\n top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None\n\n\n@dataclass_with_extra\nclass TextGenerationOutputDetails(BaseInferenceType):\n finish_reason: "TextGenerationOutputFinishReason"\n generated_tokens: int\n prefill: List[TextGenerationOutputPrefillToken]\n tokens: List[TextGenerationOutputToken]\n best_of_sequences: Optional[List[TextGenerationOutputBestOfSequence]] = None\n seed: Optional[int] = None\n top_tokens: Optional[List[List[TextGenerationOutputToken]]] = None\n\n\n@dataclass_with_extra\nclass TextGenerationOutput(BaseInferenceType):\n """Text Generation Output.\n Auto-generated from TGI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.\n """\n\n generated_text: str\n details: Optional[TextGenerationOutputDetails] = None\n\n\n@dataclass_with_extra\nclass TextGenerationStreamOutputStreamDetails(BaseInferenceType):\n finish_reason: "TextGenerationOutputFinishReason"\n generated_tokens: int\n input_length: int\n seed: Optional[int] = None\n\n\n@dataclass_with_extra\nclass TextGenerationStreamOutputToken(BaseInferenceType):\n id: int\n logprob: float\n special: bool\n text: str\n\n\n@dataclass_with_extra\nclass TextGenerationStreamOutput(BaseInferenceType):\n """Text Generation Stream Output.\n Auto-generated from TGI specs.\n For more details, check out\n https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.\n """\n\n index: int\n token: TextGenerationStreamOutputToken\n details: Optional[TextGenerationStreamOutputStreamDetails] = None\n generated_text: Optional[str] = None\n top_tokens: Optional[List[TextGenerationStreamOutputToken]] = None\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text_generation.py
text_generation.py
Python
5,922
0.95
0.130952
0.036232
vue-tools
704
2023-11-12T16:16:59.095198
GPL-3.0
false
0674754b1fe32518bf38dfea1c366c1c
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Literal, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nTextToAudioEarlyStoppingEnum = Literal["never"]\n\n\n@dataclass_with_extra\nclass TextToAudioGenerationParameters(BaseInferenceType):\n """Parametrization of the text generation process"""\n\n do_sample: Optional[bool] = None\n """Whether to use sampling instead of greedy decoding when generating new tokens."""\n early_stopping: Optional[Union[bool, "TextToAudioEarlyStoppingEnum"]] = None\n """Controls the stopping condition for beam-based methods."""\n epsilon_cutoff: Optional[float] = None\n """If set to float strictly between 0 and 1, only tokens with a conditional probability\n greater than epsilon_cutoff will be sampled. In the paper, suggested values range from\n 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language\n Model Desmoothing](https://hf.co/papers/2210.15191) for more details.\n """\n eta_cutoff: Optional[float] = None\n """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to\n float strictly between 0 and 1, a token is only considered if it is greater than either\n eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter\n term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In\n the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.\n See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)\n for more details.\n """\n max_length: Optional[int] = None\n """The maximum length (in tokens) of the generated text, including the input."""\n max_new_tokens: Optional[int] = None\n """The maximum number of tokens to generate. Takes precedence over max_length."""\n min_length: Optional[int] = None\n """The minimum length (in tokens) of the generated text, including the input."""\n min_new_tokens: Optional[int] = None\n """The minimum number of tokens to generate. Takes precedence over min_length."""\n num_beam_groups: Optional[int] = None\n """Number of groups to divide num_beams into in order to ensure diversity among different\n groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.\n """\n num_beams: Optional[int] = None\n """Number of beams to use for beam search."""\n penalty_alpha: Optional[float] = None\n """The value balances the model confidence and the degeneration penalty in contrastive\n search decoding.\n """\n temperature: Optional[float] = None\n """The value used to modulate the next token probabilities."""\n top_k: Optional[int] = None\n """The number of highest probability vocabulary tokens to keep for top-k-filtering."""\n top_p: Optional[float] = None\n """If set to float < 1, only the smallest set of most probable tokens with probabilities\n that add up to top_p or higher are kept for generation.\n """\n typical_p: Optional[float] = None\n """Local typicality measures how similar the conditional probability of predicting a target\n token next is to the expected conditional probability of predicting a random token next,\n given the partial text already generated. If set to float < 1, the smallest set of the\n most locally typical tokens with probabilities that add up to typical_p or higher are\n kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.\n """\n use_cache: Optional[bool] = None\n """Whether the model should use the past last key/values attentions to speed up decoding"""\n\n\n@dataclass_with_extra\nclass TextToAudioParameters(BaseInferenceType):\n """Additional inference parameters for Text To Audio"""\n\n generation_parameters: Optional[TextToAudioGenerationParameters] = None\n """Parametrization of the text generation process"""\n\n\n@dataclass_with_extra\nclass TextToAudioInput(BaseInferenceType):\n """Inputs for Text To Audio inference"""\n\n inputs: str\n """The input text data"""\n parameters: Optional[TextToAudioParameters] = None\n """Additional inference parameters for Text To Audio"""\n\n\n@dataclass_with_extra\nclass TextToAudioOutput(BaseInferenceType):\n """Outputs of inference for the Text To Audio task"""\n\n audio: Any\n """The generated audio waveform."""\n sampling_rate: float\n """The sampling rate of the generated audio waveform."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text_to_audio.py
text_to_audio.py
Python
4,741
0.95
0.181818
0.059524
python-kit
154
2024-02-18T20:51:07.861351
MIT
false
522ab51ff616d9c582318e94cd39942d
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass TextToImageParameters(BaseInferenceType):\n """Additional inference parameters for Text To Image"""\n\n guidance_scale: Optional[float] = None\n """A higher guidance scale value encourages the model to generate images closely linked to\n the text prompt, but values too high may cause saturation and other artifacts.\n """\n height: Optional[int] = None\n """The height in pixels of the output image"""\n negative_prompt: Optional[str] = None\n """One prompt to guide what NOT to include in image generation."""\n num_inference_steps: Optional[int] = None\n """The number of denoising steps. More denoising steps usually lead to a higher quality\n image at the expense of slower inference.\n """\n scheduler: Optional[str] = None\n """Override the scheduler with a compatible one."""\n seed: Optional[int] = None\n """Seed for the random number generator."""\n width: Optional[int] = None\n """The width in pixels of the output image"""\n\n\n@dataclass_with_extra\nclass TextToImageInput(BaseInferenceType):\n """Inputs for Text To Image inference"""\n\n inputs: str\n """The input text data (sometimes called "prompt")"""\n parameters: Optional[TextToImageParameters] = None\n """Additional inference parameters for Text To Image"""\n\n\n@dataclass_with_extra\nclass TextToImageOutput(BaseInferenceType):\n """Outputs of inference for the Text To Image task"""\n\n image: Any\n """The generated image returned as raw bytes in the payload."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text_to_image.py
text_to_image.py
Python
1,903
0.95
0.16
0.125
awesome-app
358
2024-03-14T22:40:28.765433
Apache-2.0
false
401ca4cf2a45fbf0a1854504d0f8b548
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Literal, Optional, Union\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nTextToSpeechEarlyStoppingEnum = Literal["never"]\n\n\n@dataclass_with_extra\nclass TextToSpeechGenerationParameters(BaseInferenceType):\n """Parametrization of the text generation process"""\n\n do_sample: Optional[bool] = None\n """Whether to use sampling instead of greedy decoding when generating new tokens."""\n early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None\n """Controls the stopping condition for beam-based methods."""\n epsilon_cutoff: Optional[float] = None\n """If set to float strictly between 0 and 1, only tokens with a conditional probability\n greater than epsilon_cutoff will be sampled. In the paper, suggested values range from\n 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language\n Model Desmoothing](https://hf.co/papers/2210.15191) for more details.\n """\n eta_cutoff: Optional[float] = None\n """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to\n float strictly between 0 and 1, a token is only considered if it is greater than either\n eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter\n term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In\n the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.\n See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)\n for more details.\n """\n max_length: Optional[int] = None\n """The maximum length (in tokens) of the generated text, including the input."""\n max_new_tokens: Optional[int] = None\n """The maximum number of tokens to generate. Takes precedence over max_length."""\n min_length: Optional[int] = None\n """The minimum length (in tokens) of the generated text, including the input."""\n min_new_tokens: Optional[int] = None\n """The minimum number of tokens to generate. Takes precedence over min_length."""\n num_beam_groups: Optional[int] = None\n """Number of groups to divide num_beams into in order to ensure diversity among different\n groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.\n """\n num_beams: Optional[int] = None\n """Number of beams to use for beam search."""\n penalty_alpha: Optional[float] = None\n """The value balances the model confidence and the degeneration penalty in contrastive\n search decoding.\n """\n temperature: Optional[float] = None\n """The value used to modulate the next token probabilities."""\n top_k: Optional[int] = None\n """The number of highest probability vocabulary tokens to keep for top-k-filtering."""\n top_p: Optional[float] = None\n """If set to float < 1, only the smallest set of most probable tokens with probabilities\n that add up to top_p or higher are kept for generation.\n """\n typical_p: Optional[float] = None\n """Local typicality measures how similar the conditional probability of predicting a target\n token next is to the expected conditional probability of predicting a random token next,\n given the partial text already generated. If set to float < 1, the smallest set of the\n most locally typical tokens with probabilities that add up to typical_p or higher are\n kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.\n """\n use_cache: Optional[bool] = None\n """Whether the model should use the past last key/values attentions to speed up decoding"""\n\n\n@dataclass_with_extra\nclass TextToSpeechParameters(BaseInferenceType):\n """Additional inference parameters for Text To Speech"""\n\n generation_parameters: Optional[TextToSpeechGenerationParameters] = None\n """Parametrization of the text generation process"""\n\n\n@dataclass_with_extra\nclass TextToSpeechInput(BaseInferenceType):\n """Inputs for Text To Speech inference"""\n\n inputs: str\n """The input text data"""\n parameters: Optional[TextToSpeechParameters] = None\n """Additional inference parameters for Text To Speech"""\n\n\n@dataclass_with_extra\nclass TextToSpeechOutput(BaseInferenceType):\n """Outputs of inference for the Text To Speech task"""\n\n audio: Any\n """The generated audio"""\n sampling_rate: Optional[float] = None\n """The sampling rate of the generated audio waveform."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text_to_speech.py
text_to_speech.py
Python
4,760
0.95
0.181818
0.059524
awesome-app
666
2025-04-04T23:10:46.598538
MIT
false
38bfee6251533d3803808d34d54aa581
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, List, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass TextToVideoParameters(BaseInferenceType):\n """Additional inference parameters for Text To Video"""\n\n guidance_scale: Optional[float] = None\n """A higher guidance scale value encourages the model to generate videos closely linked to\n the text prompt, but values too high may cause saturation and other artifacts.\n """\n negative_prompt: Optional[List[str]] = None\n """One or several prompt to guide what NOT to include in video generation."""\n num_frames: Optional[float] = None\n """The num_frames parameter determines how many video frames are generated."""\n num_inference_steps: Optional[int] = None\n """The number of denoising steps. More denoising steps usually lead to a higher quality\n video at the expense of slower inference.\n """\n seed: Optional[int] = None\n """Seed for the random number generator."""\n\n\n@dataclass_with_extra\nclass TextToVideoInput(BaseInferenceType):\n """Inputs for Text To Video inference"""\n\n inputs: str\n """The input text data (sometimes called "prompt")"""\n parameters: Optional[TextToVideoParameters] = None\n """Additional inference parameters for Text To Video"""\n\n\n@dataclass_with_extra\nclass TextToVideoOutput(BaseInferenceType):\n """Outputs of inference for the Text To Video task"""\n\n video: Any\n """The generated video returned as raw bytes in the payload."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\text_to_video.py
text_to_video.py
Python
1,790
0.95
0.173913
0.138889
awesome-app
795
2023-08-16T08:50:36.297164
Apache-2.0
false
bb0f4c0ff9a87a56e3e62734fdfdfb9b
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import List, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nTokenClassificationAggregationStrategy = Literal["none", "simple", "first", "average", "max"]\n\n\n@dataclass_with_extra\nclass TokenClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Token Classification"""\n\n aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None\n """The strategy used to fuse tokens based on model predictions"""\n ignore_labels: Optional[List[str]] = None\n """A list of labels to ignore"""\n stride: Optional[int] = None\n """The number of overlapping tokens between chunks when splitting the input text."""\n\n\n@dataclass_with_extra\nclass TokenClassificationInput(BaseInferenceType):\n """Inputs for Token Classification inference"""\n\n inputs: str\n """The input text data"""\n parameters: Optional[TokenClassificationParameters] = None\n """Additional inference parameters for Token Classification"""\n\n\n@dataclass_with_extra\nclass TokenClassificationOutputElement(BaseInferenceType):\n """Outputs of inference for the Token Classification task"""\n\n end: int\n """The character position in the input where this group ends."""\n score: float\n """The associated score / probability"""\n start: int\n """The character position in the input where this group begins."""\n word: str\n """The corresponding text"""\n entity: Optional[str] = None\n """The predicted label for a single token"""\n entity_group: Optional[str] = None\n """The predicted label for a group of one or more tokens"""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\token_classification.py
token_classification.py
Python
1,915
0.95
0.176471
0.128205
python-kit
949
2025-01-12T00:14:40.406288
BSD-3-Clause
false
2935050b06ea55888bce688b834792e4
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Dict, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nTranslationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]\n\n\n@dataclass_with_extra\nclass TranslationParameters(BaseInferenceType):\n """Additional inference parameters for Translation"""\n\n clean_up_tokenization_spaces: Optional[bool] = None\n """Whether to clean up the potential extra spaces in the text output."""\n generate_parameters: Optional[Dict[str, Any]] = None\n """Additional parametrization of the text generation algorithm."""\n src_lang: Optional[str] = None\n """The source language of the text. Required for models that can translate from multiple\n languages.\n """\n tgt_lang: Optional[str] = None\n """Target language to translate to. Required for models that can translate to multiple\n languages.\n """\n truncation: Optional["TranslationTruncationStrategy"] = None\n """The truncation strategy to use."""\n\n\n@dataclass_with_extra\nclass TranslationInput(BaseInferenceType):\n """Inputs for Translation inference"""\n\n inputs: str\n """The text to translate."""\n parameters: Optional[TranslationParameters] = None\n """Additional inference parameters for Translation"""\n\n\n@dataclass_with_extra\nclass TranslationOutput(BaseInferenceType):\n """Outputs of inference for the Translation task"""\n\n translation_text: str\n """The translated text."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\translation.py
translation.py
Python
1,763
0.95
0.183673
0.135135
node-utils
189
2023-12-12T04:25:18.440435
BSD-3-Clause
false
fd3a289fd1d1e66b7c2685d7d19dde36
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Literal, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\nVideoClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]\n\n\n@dataclass_with_extra\nclass VideoClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Video Classification"""\n\n frame_sampling_rate: Optional[int] = None\n """The sampling rate used to select frames from the video."""\n function_to_apply: Optional["VideoClassificationOutputTransform"] = None\n """The function to apply to the model outputs in order to retrieve the scores."""\n num_frames: Optional[int] = None\n """The number of sampled frames to consider for classification."""\n top_k: Optional[int] = None\n """When specified, limits the output to the top K most probable classes."""\n\n\n@dataclass_with_extra\nclass VideoClassificationInput(BaseInferenceType):\n """Inputs for Video Classification inference"""\n\n inputs: Any\n """The input video data"""\n parameters: Optional[VideoClassificationParameters] = None\n """Additional inference parameters for Video Classification"""\n\n\n@dataclass_with_extra\nclass VideoClassificationOutputElement(BaseInferenceType):\n """Outputs of inference for the Video Classification task"""\n\n label: str\n """The predicted class label."""\n score: float\n """The corresponding probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\video_classification.py
video_classification.py
Python
1,680
0.95
0.222222
0.151515
node-utils
825
2024-04-27T23:31:00.201626
BSD-3-Clause
false
09ff1db62dcf532a56b18902d2e3cd38
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import Any, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass VisualQuestionAnsweringInputData(BaseInferenceType):\n """One (image, question) pair to answer"""\n\n image: Any\n """The image."""\n question: str\n """The question to answer based on the image."""\n\n\n@dataclass_with_extra\nclass VisualQuestionAnsweringParameters(BaseInferenceType):\n """Additional inference parameters for Visual Question Answering"""\n\n top_k: Optional[int] = None\n """The number of answers to return (will be chosen by order of likelihood). Note that we\n return less than topk answers if there are not enough options available within the\n context.\n """\n\n\n@dataclass_with_extra\nclass VisualQuestionAnsweringInput(BaseInferenceType):\n """Inputs for Visual Question Answering inference"""\n\n inputs: VisualQuestionAnsweringInputData\n """One (image, question) pair to answer"""\n parameters: Optional[VisualQuestionAnsweringParameters] = None\n """Additional inference parameters for Visual Question Answering"""\n\n\n@dataclass_with_extra\nclass VisualQuestionAnsweringOutputElement(BaseInferenceType):\n """Outputs of inference for the Visual Question Answering task"""\n\n score: float\n """The associated score / probability"""\n answer: Optional[str] = None\n """The answer to the question"""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\visual_question_answering.py
visual_question_answering.py
Python
1,673
0.95
0.183673
0.138889
awesome-app
321
2024-07-05T04:12:46.969953
MIT
false
5b4bd0939b49be5da38b6910c3027728
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import List, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass ZeroShotClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Zero Shot Classification"""\n\n candidate_labels: List[str]\n """The set of possible class labels to classify the text into."""\n hypothesis_template: Optional[str] = None\n """The sentence used in conjunction with `candidate_labels` to attempt the text\n classification by replacing the placeholder with the candidate labels.\n """\n multi_label: Optional[bool] = None\n """Whether multiple candidate labels can be true. If false, the scores are normalized such\n that the sum of the label likelihoods for each sequence is 1. If true, the labels are\n considered independent and probabilities are normalized for each candidate.\n """\n\n\n@dataclass_with_extra\nclass ZeroShotClassificationInput(BaseInferenceType):\n """Inputs for Zero Shot Classification inference"""\n\n inputs: str\n """The text to classify"""\n parameters: ZeroShotClassificationParameters\n """Additional inference parameters for Zero Shot Classification"""\n\n\n@dataclass_with_extra\nclass ZeroShotClassificationOutputElement(BaseInferenceType):\n """Outputs of inference for the Zero Shot Classification task"""\n\n label: str\n """The predicted class label."""\n score: float\n """The corresponding probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\zero_shot_classification.py
zero_shot_classification.py
Python
1,738
0.95
0.244444
0.142857
react-lib
849
2023-09-06T03:25:33.032859
GPL-3.0
false
0e90ad8f872470ea9ddf2cdbc2e6f2df
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import List, Optional\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass ZeroShotImageClassificationParameters(BaseInferenceType):\n """Additional inference parameters for Zero Shot Image Classification"""\n\n candidate_labels: List[str]\n """The candidate labels for this image"""\n hypothesis_template: Optional[str] = None\n """The sentence used in conjunction with `candidate_labels` to attempt the image\n classification by replacing the placeholder with the candidate labels.\n """\n\n\n@dataclass_with_extra\nclass ZeroShotImageClassificationInput(BaseInferenceType):\n """Inputs for Zero Shot Image Classification inference"""\n\n inputs: str\n """The input image data to classify as a base64-encoded string."""\n parameters: ZeroShotImageClassificationParameters\n """Additional inference parameters for Zero Shot Image Classification"""\n\n\n@dataclass_with_extra\nclass ZeroShotImageClassificationOutputElement(BaseInferenceType):\n """Outputs of inference for the Zero Shot Image Classification task"""\n\n label: str\n """The predicted class label."""\n score: float\n """The corresponding probability."""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\zero_shot_image_classification.py
zero_shot_image_classification.py
Python
1,487
0.95
0.225
0.166667
vue-tools
220
2024-04-21T19:26:10.957443
Apache-2.0
false
da63211672d73a109da4104142b73770
# Inference code generated from the JSON schema spec in @huggingface/tasks.\n#\n# See:\n# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts\n# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.\nfrom typing import List\n\nfrom .base import BaseInferenceType, dataclass_with_extra\n\n\n@dataclass_with_extra\nclass ZeroShotObjectDetectionParameters(BaseInferenceType):\n """Additional inference parameters for Zero Shot Object Detection"""\n\n candidate_labels: List[str]\n """The candidate labels for this image"""\n\n\n@dataclass_with_extra\nclass ZeroShotObjectDetectionInput(BaseInferenceType):\n """Inputs for Zero Shot Object Detection inference"""\n\n inputs: str\n """The input image data as a base64-encoded string."""\n parameters: ZeroShotObjectDetectionParameters\n """Additional inference parameters for Zero Shot Object Detection"""\n\n\n@dataclass_with_extra\nclass ZeroShotObjectDetectionBoundingBox(BaseInferenceType):\n """The predicted bounding box. Coordinates are relative to the top left corner of the input\n image.\n """\n\n xmax: int\n xmin: int\n ymax: int\n ymin: int\n\n\n@dataclass_with_extra\nclass ZeroShotObjectDetectionOutputElement(BaseInferenceType):\n """Outputs of inference for the Zero Shot Object Detection task"""\n\n box: ZeroShotObjectDetectionBoundingBox\n """The predicted bounding box. Coordinates are relative to the top left corner of the input\n image.\n """\n label: str\n """A candidate label"""\n score: float\n """The associated score / probability"""\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\zero_shot_object_detection.py
zero_shot_object_detection.py
Python
1,630
0.95
0.173077
0.128205
react-lib
287
2025-07-08T14:59:06.728719
GPL-3.0
false
ee005f1a0b4239fe589bb76b13995d34
# This file is auto-generated by `utils/generate_inference_types.py`.\n# Do not modify it manually.\n#\n# ruff: noqa: F401\n\nfrom .audio_classification import (\n AudioClassificationInput,\n AudioClassificationOutputElement,\n AudioClassificationOutputTransform,\n AudioClassificationParameters,\n)\nfrom .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement\nfrom .automatic_speech_recognition import (\n AutomaticSpeechRecognitionEarlyStoppingEnum,\n AutomaticSpeechRecognitionGenerationParameters,\n AutomaticSpeechRecognitionInput,\n AutomaticSpeechRecognitionOutput,\n AutomaticSpeechRecognitionOutputChunk,\n AutomaticSpeechRecognitionParameters,\n)\nfrom .base import BaseInferenceType\nfrom .chat_completion import (\n ChatCompletionInput,\n ChatCompletionInputFunctionDefinition,\n ChatCompletionInputFunctionName,\n ChatCompletionInputGrammarType,\n ChatCompletionInputJSONSchema,\n ChatCompletionInputMessage,\n ChatCompletionInputMessageChunk,\n ChatCompletionInputMessageChunkType,\n ChatCompletionInputResponseFormatJSONObject,\n ChatCompletionInputResponseFormatJSONSchema,\n ChatCompletionInputResponseFormatText,\n ChatCompletionInputStreamOptions,\n ChatCompletionInputTool,\n ChatCompletionInputToolCall,\n ChatCompletionInputToolChoiceClass,\n ChatCompletionInputToolChoiceEnum,\n ChatCompletionInputURL,\n ChatCompletionOutput,\n ChatCompletionOutputComplete,\n ChatCompletionOutputFunctionDefinition,\n ChatCompletionOutputLogprob,\n ChatCompletionOutputLogprobs,\n ChatCompletionOutputMessage,\n ChatCompletionOutputToolCall,\n ChatCompletionOutputTopLogprob,\n ChatCompletionOutputUsage,\n ChatCompletionStreamOutput,\n ChatCompletionStreamOutputChoice,\n ChatCompletionStreamOutputDelta,\n ChatCompletionStreamOutputDeltaToolCall,\n ChatCompletionStreamOutputFunction,\n ChatCompletionStreamOutputLogprob,\n ChatCompletionStreamOutputLogprobs,\n ChatCompletionStreamOutputTopLogprob,\n ChatCompletionStreamOutputUsage,\n)\nfrom .depth_estimation import DepthEstimationInput, DepthEstimationOutput\nfrom .document_question_answering import (\n DocumentQuestionAnsweringInput,\n DocumentQuestionAnsweringInputData,\n DocumentQuestionAnsweringOutputElement,\n DocumentQuestionAnsweringParameters,\n)\nfrom .feature_extraction import FeatureExtractionInput, FeatureExtractionInputTruncationDirection\nfrom .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters\nfrom .image_classification import (\n ImageClassificationInput,\n ImageClassificationOutputElement,\n ImageClassificationOutputTransform,\n ImageClassificationParameters,\n)\nfrom .image_segmentation import (\n ImageSegmentationInput,\n ImageSegmentationOutputElement,\n ImageSegmentationParameters,\n ImageSegmentationSubtask,\n)\nfrom .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize\nfrom .image_to_text import (\n ImageToTextEarlyStoppingEnum,\n ImageToTextGenerationParameters,\n ImageToTextInput,\n ImageToTextOutput,\n ImageToTextParameters,\n)\nfrom .object_detection import (\n ObjectDetectionBoundingBox,\n ObjectDetectionInput,\n ObjectDetectionOutputElement,\n ObjectDetectionParameters,\n)\nfrom .question_answering import (\n QuestionAnsweringInput,\n QuestionAnsweringInputData,\n QuestionAnsweringOutputElement,\n QuestionAnsweringParameters,\n)\nfrom .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData\nfrom .summarization import (\n SummarizationInput,\n SummarizationOutput,\n SummarizationParameters,\n SummarizationTruncationStrategy,\n)\nfrom .table_question_answering import (\n Padding,\n TableQuestionAnsweringInput,\n TableQuestionAnsweringInputData,\n TableQuestionAnsweringOutputElement,\n TableQuestionAnsweringParameters,\n)\nfrom .text2text_generation import (\n Text2TextGenerationInput,\n Text2TextGenerationOutput,\n Text2TextGenerationParameters,\n Text2TextGenerationTruncationStrategy,\n)\nfrom .text_classification import (\n TextClassificationInput,\n TextClassificationOutputElement,\n TextClassificationOutputTransform,\n TextClassificationParameters,\n)\nfrom .text_generation import (\n TextGenerationInput,\n TextGenerationInputGenerateParameters,\n TextGenerationInputGrammarType,\n TextGenerationOutput,\n TextGenerationOutputBestOfSequence,\n TextGenerationOutputDetails,\n TextGenerationOutputFinishReason,\n TextGenerationOutputPrefillToken,\n TextGenerationOutputToken,\n TextGenerationStreamOutput,\n TextGenerationStreamOutputStreamDetails,\n TextGenerationStreamOutputToken,\n TypeEnum,\n)\nfrom .text_to_audio import (\n TextToAudioEarlyStoppingEnum,\n TextToAudioGenerationParameters,\n TextToAudioInput,\n TextToAudioOutput,\n TextToAudioParameters,\n)\nfrom .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters\nfrom .text_to_speech import (\n TextToSpeechEarlyStoppingEnum,\n TextToSpeechGenerationParameters,\n TextToSpeechInput,\n TextToSpeechOutput,\n TextToSpeechParameters,\n)\nfrom .text_to_video import TextToVideoInput, TextToVideoOutput, TextToVideoParameters\nfrom .token_classification import (\n TokenClassificationAggregationStrategy,\n TokenClassificationInput,\n TokenClassificationOutputElement,\n TokenClassificationParameters,\n)\nfrom .translation import TranslationInput, TranslationOutput, TranslationParameters, TranslationTruncationStrategy\nfrom .video_classification import (\n VideoClassificationInput,\n VideoClassificationOutputElement,\n VideoClassificationOutputTransform,\n VideoClassificationParameters,\n)\nfrom .visual_question_answering import (\n VisualQuestionAnsweringInput,\n VisualQuestionAnsweringInputData,\n VisualQuestionAnsweringOutputElement,\n VisualQuestionAnsweringParameters,\n)\nfrom .zero_shot_classification import (\n ZeroShotClassificationInput,\n ZeroShotClassificationOutputElement,\n ZeroShotClassificationParameters,\n)\nfrom .zero_shot_image_classification import (\n ZeroShotImageClassificationInput,\n ZeroShotImageClassificationOutputElement,\n ZeroShotImageClassificationParameters,\n)\nfrom .zero_shot_object_detection import (\n ZeroShotObjectDetectionBoundingBox,\n ZeroShotObjectDetectionInput,\n ZeroShotObjectDetectionOutputElement,\n ZeroShotObjectDetectionParameters,\n)\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__init__.py
__init__.py
Python
6,443
0.95
0
0.021053
node-utils
218
2024-02-02T13:41:41.600001
Apache-2.0
false
7b83acd3402476deda943b7e435f5e3c
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\audio_classification.cpython-313.pyc
audio_classification.cpython-313.pyc
Other
1,714
0.95
0.375
0
awesome-app
763
2025-06-16T02:52:09.435902
GPL-3.0
false
29abfc15169be04dff23d419a1d8264e
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\audio_to_audio.cpython-313.pyc
audio_to_audio.cpython-313.pyc
Other
1,148
0.8
0.2
0
react-lib
586
2024-01-12T05:15:47.202420
GPL-3.0
false
19219f4ac133a6b7228ccf713919532b
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\automatic_speech_recognition.cpython-313.pyc
automatic_speech_recognition.cpython-313.pyc
Other
3,487
0.8
0.115385
0
vue-tools
395
2025-01-20T14:02:41.537317
BSD-3-Clause
false
ac5c915c5ea03472d004034dbdd1bc49
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\base.cpython-313.pyc
base.cpython-313.pyc
Other
7,827
0.95
0.092105
0
node-utils
1
2024-09-22T21:35:05.628471
Apache-2.0
false
d6fd95c7ad9cae750503ccd6dba052d3
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\chat_completion.cpython-313.pyc
chat_completion.cpython-313.pyc
Other
13,035
0.95
0.008547
0
vue-tools
50
2024-08-28T22:55:25.632723
MIT
false
fbed7af6a6c2d391787e35ea3273085c
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\depth_estimation.cpython-313.pyc
depth_estimation.cpython-313.pyc
Other
1,208
0.7
0.2
0
node-utils
945
2025-02-23T17:31:16.264681
Apache-2.0
false
b943609f6c450a259102ef888542df31
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\document_question_answering.cpython-313.pyc
document_question_answering.cpython-313.pyc
Other
2,561
0.8
0.1875
0.066667
awesome-app
343
2024-03-01T18:40:48.849402
GPL-3.0
false
b73e848c121c991e631d3a4db4ef71d4
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\feature_extraction.cpython-313.pyc
feature_extraction.cpython-313.pyc
Other
1,305
0.95
0
0
python-kit
718
2024-08-08T23:38:20.161791
BSD-3-Clause
false
8f7e7aed8713dabccaf2a5bc92257b66
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\fill_mask.cpython-313.pyc
fill_mask.cpython-313.pyc
Other
1,721
0.8
0.12
0
node-utils
407
2023-11-01T19:37:00.837567
MIT
false
469dc6ec293ef3bce30f97ae4f3602f7
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\image_classification.cpython-313.pyc
image_classification.cpython-313.pyc
Other
1,726
0.95
0.375
0
node-utils
357
2024-06-01T13:57:23.756699
MIT
false
18fc1d981bd89f20f4ad2f633620d793
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\image_segmentation.cpython-313.pyc
image_segmentation.cpython-313.pyc
Other
1,919
0.8
0.136364
0
react-lib
391
2023-07-25T16:34:44.571835
GPL-3.0
false
b68395073410a755fef28e5dfcc32eb0
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\image_to_image.cpython-313.pyc
image_to_image.cpython-313.pyc
Other
2,078
0.8
0.142857
0
react-lib
726
2024-01-30T12:07:04.952368
Apache-2.0
false
1b11c1eecf78f28af9930e634c92868a
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\image_to_text.cpython-313.pyc
image_to_text.cpython-313.pyc
Other
3,014
0.8
0.136364
0
awesome-app
200
2023-08-12T06:46:00.152236
MIT
false
8b9588590de7ce9bda8577e80a2302eb
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\object_detection.cpython-313.pyc
object_detection.cpython-313.pyc
Other
2,012
0.8
0.125
0
awesome-app
517
2023-11-22T23:59:13.282351
GPL-3.0
false
38b27c3b68ebca20b119a0e07ab5a0eb
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\question_answering.cpython-313.pyc
question_answering.cpython-313.pyc
Other
2,329
0.8
0.125
0
node-utils
724
2025-02-19T18:59:56.602842
GPL-3.0
false
02b8c71c8d69521a3d9eda99fd13f3fc
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\sentence_similarity.cpython-313.pyc
sentence_similarity.cpython-313.pyc
Other
1,200
0.7
0.142857
0
awesome-app
50
2024-05-15T23:03:14.337782
GPL-3.0
false
56e8d13a52c6f9f69cb6657d9b8bc214
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\summarization.cpython-313.pyc
summarization.cpython-313.pyc
Other
1,817
0.8
0.25
0
vue-tools
286
2023-12-08T17:45:51.018735
Apache-2.0
false
39decc4b844bf117279c1304723863d4
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\table_question_answering.cpython-313.pyc
table_question_answering.cpython-313.pyc
Other
2,337
0.8
0.15
0
react-lib
899
2024-02-24T13:44:32.443607
GPL-3.0
false
8a2a7cce213b68c57742f76738b885bb
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text2text_generation.cpython-313.pyc
text2text_generation.cpython-313.pyc
Other
1,968
0.8
0.230769
0
react-lib
755
2023-09-14T11:25:17.088873
BSD-3-Clause
false
e43cc570d0e3d3ae86219d791df9285f
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text_classification.cpython-313.pyc
text_classification.cpython-313.pyc
Other
1,714
0.95
0.375
0
python-kit
517
2023-12-28T00:52:58.100163
Apache-2.0
false
f155082a0ede8c9618d5c0b155949563
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text_generation.cpython-313.pyc
text_generation.cpython-313.pyc
Other
6,471
0.95
0
0
react-lib
32
2024-01-17T04:28:17.863045
MIT
false
db44cd82620a48169a7f1180e644c48d
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text_to_audio.cpython-313.pyc
text_to_audio.cpython-313.pyc
Other
2,904
0.8
0.136364
0
react-lib
67
2023-07-12T10:04:14.747099
Apache-2.0
false
00e448c886cdf04e17f4ae8b6072b42d
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text_to_image.cpython-313.pyc
text_to_image.cpython-313.pyc
Other
1,829
0.8
0.214286
0
node-utils
673
2023-11-30T12:20:46.585619
MIT
false
dad3a876c5c4890902539315b278e85c
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text_to_speech.cpython-313.pyc
text_to_speech.cpython-313.pyc
Other
2,941
0.8
0.130435
0
python-kit
302
2025-03-23T07:46:16.927003
GPL-3.0
false
3799f5afbe14296771deff3174d08c36
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\text_to_video.cpython-313.pyc
text_to_video.cpython-313.pyc
Other
1,761
0.8
0.166667
0
node-utils
780
2023-12-10T08:56:45.198005
GPL-3.0
false
e6d1adf1c11cda05951e0e4316997112
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\token_classification.cpython-313.pyc
token_classification.cpython-313.pyc
Other
2,013
0.8
0.1875
0
react-lib
697
2024-11-26T23:44:28.729550
Apache-2.0
false
ab45b39e227410cbce039ad31e8c28b3
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\translation.cpython-313.pyc
translation.cpython-313.pyc
Other
1,921
0.8
0.230769
0
python-kit
176
2024-04-04T05:15:24.862001
Apache-2.0
false
8880f77e88f165998c51cc315b94565d
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\video_classification.cpython-313.pyc
video_classification.cpython-313.pyc
Other
1,861
0.95
0.15
0
vue-tools
674
2025-01-17T14:02:55.498833
Apache-2.0
false
25c44cea3692791f55895b27978ae5c0
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\visual_question_answering.cpython-313.pyc
visual_question_answering.cpython-313.pyc
Other
1,976
0.8
0.166667
0
node-utils
733
2025-07-05T02:41:23.703180
GPL-3.0
false
3846598b5d07a87cc164916fa1462cab
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\zero_shot_classification.cpython-313.pyc
zero_shot_classification.cpython-313.pyc
Other
1,692
0.8
0.272727
0
react-lib
54
2025-01-19T10:30:50.542621
Apache-2.0
false
cb3d94c45337ed1881ac0c1d09092926
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\zero_shot_image_classification.cpython-313.pyc
zero_shot_image_classification.cpython-313.pyc
Other
1,664
0.8
0.25
0
awesome-app
931
2023-10-05T15:41:02.394540
Apache-2.0
false
f146080d75a06aa8ec049acdcd29d608
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\zero_shot_object_detection.cpython-313.pyc
zero_shot_object_detection.cpython-313.pyc
Other
2,022
0.8
0.142857
0
node-utils
353
2024-10-17T01:43:52.400954
Apache-2.0
false
ea4b4af42fd8f378e9d67a7eaba4d4dc
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\types\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
7,635
0.8
0
0
react-lib
750
2024-09-08T08:52:01.388467
MIT
false
c2eb7185783d3e4e71769c69e855976c
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_generated\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
211
0.7
0
0
vue-tools
279
2024-03-23T15:41:13.548123
GPL-3.0
false
4b2c454252402803dd36cdac05de4afa
from __future__ import annotations\n\nimport asyncio\nfrom typing import AsyncGenerator, Dict, Iterable, List, Optional, Union\n\nfrom huggingface_hub import ChatCompletionInputMessage, ChatCompletionStreamOutput, MCPClient\n\nfrom .._providers import PROVIDER_OR_POLICY_T\nfrom .constants import DEFAULT_SYSTEM_PROMPT, EXIT_LOOP_TOOLS, MAX_NUM_TURNS\nfrom .types import ServerConfig\n\n\nclass Agent(MCPClient):\n """\n Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`].\n\n <Tip warning={true}>\n\n This class is experimental and might be subject to breaking changes in the future without prior notice.\n\n </Tip>\n\n Args:\n model (`str`, *optional*):\n The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`\n or a URL to a deployed Inference Endpoint or other local or remote endpoint.\n servers (`Iterable[Dict]`):\n MCP servers to connect to. Each server is a dictionary containing a `type` key and a `config` key. The `type` key can be `"stdio"` or `"sse"`, and the `config` key is a dictionary of arguments for the server.\n provider (`str`, *optional*):\n Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.\n If model is a URL or `base_url` is passed, then `provider` is not used.\n base_url (`str`, *optional*):\n The base URL to run inference. Defaults to None.\n api_key (`str`, *optional*):\n Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.\n prompt (`str`, *optional*):\n The system prompt to use for the agent. Defaults to the default system prompt in `constants.py`.\n """\n\n def __init__(\n self,\n *,\n model: Optional[str] = None,\n servers: Iterable[ServerConfig],\n provider: Optional[PROVIDER_OR_POLICY_T] = None,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n prompt: Optional[str] = None,\n ):\n super().__init__(model=model, provider=provider, base_url=base_url, api_key=api_key)\n self._servers_cfg = list(servers)\n self.messages: List[Union[Dict, ChatCompletionInputMessage]] = [\n {"role": "system", "content": prompt or DEFAULT_SYSTEM_PROMPT}\n ]\n\n async def load_tools(self) -> None:\n for cfg in self._servers_cfg:\n await self.add_mcp_server(**cfg)\n\n async def run(\n self,\n user_input: str,\n *,\n abort_event: Optional[asyncio.Event] = None,\n ) -> AsyncGenerator[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage], None]:\n """\n Run the agent with the given user input.\n\n Args:\n user_input (`str`):\n The user input to run the agent with.\n abort_event (`asyncio.Event`, *optional*):\n An event that can be used to abort the agent. If the event is set, the agent will stop running.\n """\n self.messages.append({"role": "user", "content": user_input})\n\n num_turns: int = 0\n next_turn_should_call_tools = True\n\n while True:\n if abort_event and abort_event.is_set():\n return\n\n async for item in self.process_single_turn_with_tools(\n self.messages,\n exit_loop_tools=EXIT_LOOP_TOOLS,\n exit_if_first_chunk_no_tool=(num_turns > 0 and next_turn_should_call_tools),\n ):\n yield item\n\n num_turns += 1\n last = self.messages[-1]\n\n if last.get("role") == "tool" and last.get("name") in {t.function.name for t in EXIT_LOOP_TOOLS}:\n return\n\n if last.get("role") != "tool" and num_turns > MAX_NUM_TURNS:\n return\n\n if last.get("role") != "tool" and next_turn_should_call_tools:\n return\n\n next_turn_should_call_tools = last.get("role") != "tool"\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\agent.py
agent.py
Python
4,281
0.95
0.203883
0.02439
awesome-app
797
2024-04-25T15:19:05.486725
Apache-2.0
false
c72d266aed5de1e65dec707bfed38af3
import asyncio\nimport os\nimport signal\nimport traceback\nfrom typing import Optional\n\nimport typer\nfrom rich import print\n\nfrom ._cli_hacks import _async_prompt, _patch_anyio_open_process\nfrom .agent import Agent\nfrom .utils import _load_agent_config\n\n\napp = typer.Typer(\n rich_markup_mode="rich",\n help="A squad of lightweight composable AI applications built on Hugging Face's Inference Client and MCP stack.",\n)\n\nrun_cli = typer.Typer(\n name="run",\n help="Run the Agent in the CLI",\n invoke_without_command=True,\n)\napp.add_typer(run_cli, name="run")\n\n\nasync def run_agent(\n agent_path: Optional[str],\n) -> None:\n """\n Tiny Agent loop.\n\n Args:\n agent_path (`str`, *optional*):\n Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` file or a built-in agent stored in a Hugging Face dataset.\n\n """\n _patch_anyio_open_process() # Hacky way to prevent stdio connections to be stopped by Ctrl+C\n\n config, prompt = _load_agent_config(agent_path)\n\n inputs = config.get("inputs", [])\n servers = config.get("servers", [])\n\n abort_event = asyncio.Event()\n exit_event = asyncio.Event()\n first_sigint = True\n\n loop = asyncio.get_running_loop()\n original_sigint_handler = signal.getsignal(signal.SIGINT)\n\n def _sigint_handler() -> None:\n nonlocal first_sigint\n if first_sigint:\n first_sigint = False\n abort_event.set()\n print("\n[red]Interrupted. Press Ctrl+C again to quit.[/red]", flush=True)\n return\n\n print("\n[red]Exiting...[/red]", flush=True)\n exit_event.set()\n\n try:\n sigint_registered_in_loop = False\n try:\n loop.add_signal_handler(signal.SIGINT, _sigint_handler)\n sigint_registered_in_loop = True\n except (AttributeError, NotImplementedError):\n # Windows (or any loop that doesn't support it) : fall back to sync\n signal.signal(signal.SIGINT, lambda *_: _sigint_handler())\n\n # Handle inputs (i.e. env variables injection)\n if len(inputs) > 0:\n print(\n "[bold blue]Some initial inputs are required by the agent. "\n "Please provide a value or leave empty to load from env.[/bold blue]"\n )\n for input_item in inputs:\n input_id = input_item["id"]\n description = input_item["description"]\n env_special_value = "${input:" + input_id + "}" # Special value to indicate env variable injection\n\n # Check env variables that will use this input\n input_vars = set()\n for server in servers:\n # Check stdio's "env" and http/sse's "headers" mappings\n env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {})\n for key, value in env_or_headers.items():\n if env_special_value in value:\n input_vars.add(key)\n\n if not input_vars:\n print(f"[yellow]Input {input_id} defined in config but not used by any server.[/yellow]")\n continue\n\n # Prompt user for input\n env_variable_key = input_id.replace("-", "_").upper()\n print(\n f"[blue] • {input_id}[/blue]: {description}. (default: load from {env_variable_key}).",\n end=" ",\n )\n user_input = (await _async_prompt(exit_event=exit_event)).strip()\n if exit_event.is_set():\n return\n\n # Inject user input (or env variable) into stdio's env or http/sse's headers\n for server in servers:\n env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {})\n for key, value in env_or_headers.items():\n if env_special_value in value:\n if user_input:\n env_or_headers[key] = env_or_headers[key].replace(env_special_value, user_input)\n else:\n value_from_env = os.getenv(env_variable_key, "")\n env_or_headers[key] = env_or_headers[key].replace(env_special_value, value_from_env)\n if value_from_env:\n print(f"[green]Value successfully loaded from '{env_variable_key}'[/green]")\n else:\n print(\n f"[yellow]No value found for '{env_variable_key}' in environment variables. Continuing.[/yellow]"\n )\n\n print()\n\n # Main agent loop\n async with Agent(\n provider=config.get("provider"), # type: ignore[arg-type]\n model=config.get("model"),\n base_url=config.get("endpointUrl"), # type: ignore[arg-type]\n servers=servers, # type: ignore[arg-type]\n prompt=prompt,\n ) as agent:\n await agent.load_tools()\n print(f"[bold blue]Agent loaded with {len(agent.available_tools)} tools:[/bold blue]")\n for t in agent.available_tools:\n print(f"[blue] • {t.function.name}[/blue]")\n\n while True:\n abort_event.clear()\n\n # Check if we should exit\n if exit_event.is_set():\n return\n\n try:\n user_input = await _async_prompt(exit_event=exit_event)\n first_sigint = True\n except EOFError:\n print("\n[red]EOF received, exiting.[/red]", flush=True)\n break\n except KeyboardInterrupt:\n if not first_sigint and abort_event.is_set():\n continue\n else:\n print("\n[red]Keyboard interrupt during input processing.[/red]", flush=True)\n break\n\n try:\n async for chunk in agent.run(user_input, abort_event=abort_event):\n if abort_event.is_set() and not first_sigint:\n break\n if exit_event.is_set():\n return\n\n if hasattr(chunk, "choices"):\n delta = chunk.choices[0].delta\n if delta.content:\n print(delta.content, end="", flush=True)\n if delta.tool_calls:\n for call in delta.tool_calls:\n if call.id:\n print(f"<Tool {call.id}>", end="")\n if call.function.name:\n print(f"{call.function.name}", end=" ")\n if call.function.arguments:\n print(f"{call.function.arguments}", end="")\n else:\n print(\n f"\n\n[green]Tool[{chunk.name}] {chunk.tool_call_id}\n{chunk.content}[/green]\n",\n flush=True,\n )\n\n print()\n\n except Exception as e:\n tb_str = traceback.format_exc()\n print(f"\n[bold red]Error during agent run: {e}\n{tb_str}[/bold red]", flush=True)\n first_sigint = True # Allow graceful interrupt for the next command\n\n except Exception as e:\n tb_str = traceback.format_exc()\n print(f"\n[bold red]An unexpected error occurred: {e}\n{tb_str}[/bold red]", flush=True)\n raise e\n\n finally:\n if sigint_registered_in_loop:\n try:\n loop.remove_signal_handler(signal.SIGINT)\n except (AttributeError, NotImplementedError):\n pass\n else:\n signal.signal(signal.SIGINT, original_sigint_handler)\n\n\n@run_cli.callback()\ndef run(\n path: Optional[str] = typer.Argument(\n None,\n help=(\n "Path to a local folder containing an agent.json file or a built-in agent "\n "stored in the 'tiny-agents/tiny-agents' Hugging Face dataset "\n "(https://huggingface.co/datasets/tiny-agents/tiny-agents)"\n ),\n show_default=False,\n ),\n):\n try:\n asyncio.run(run_agent(path))\n except KeyboardInterrupt:\n print("\n[red]Application terminated by KeyboardInterrupt.[/red]", flush=True)\n raise typer.Exit(code=130)\n except Exception as e:\n print(f"\n[bold red]An unexpected error occurred: {e}[/bold red]", flush=True)\n raise e\n\n\nif __name__ == "__main__":\n app()\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\cli.py
cli.py
Python
9,031
0.95
0.213974
0.041451
awesome-app
504
2023-12-22T21:46:41.435565
MIT
false
b6230ea3a60bceb3857d030b20e74cc8
from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import List\n\nfrom huggingface_hub import ChatCompletionInputTool\n\n\nFILENAME_CONFIG = "agent.json"\nFILENAME_PROMPT = "PROMPT.md"\n\nDEFAULT_AGENT = {\n "model": "Qwen/Qwen2.5-72B-Instruct",\n "provider": "nebius",\n "servers": [\n {\n "type": "stdio",\n "config": {\n "command": "npx",\n "args": [\n "-y",\n "@modelcontextprotocol/server-filesystem",\n str(Path.home() / ("Desktop" if sys.platform == "darwin" else "")),\n ],\n },\n },\n {\n "type": "stdio",\n "config": {\n "command": "npx",\n "args": ["@playwright/mcp@latest"],\n },\n },\n ],\n}\n\n\nDEFAULT_SYSTEM_PROMPT = """\nYou are an agent - please keep going until the user’s query is completely\nresolved, before ending your turn and yielding back to the user. Only terminate\nyour turn when you are sure that the problem is solved, or if you need more\ninfo from the user to solve the problem.\nIf you are not sure about anything pertaining to the user’s request, use your\ntools to read files and gather the relevant information: do NOT guess or make\nup an answer.\nYou MUST plan extensively before each function call, and reflect extensively\non the outcomes of the previous function calls. DO NOT do this entire process\nby making function calls only, as this can impair your ability to solve the\nproblem and think insightfully.\n""".strip()\n\nMAX_NUM_TURNS = 10\n\nTASK_COMPLETE_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]\n {\n "type": "function",\n "function": {\n "name": "task_complete",\n "description": "Call this tool when the task given by the user is complete",\n "parameters": {"type": "object", "properties": {}},\n },\n }\n)\n\nASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment]\n {\n "type": "function",\n "function": {\n "name": "ask_question",\n "description": "Ask the user for more info required to solve or clarify their problem.",\n "parameters": {"type": "object", "properties": {}},\n },\n }\n)\n\nEXIT_LOOP_TOOLS: List[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL]\n\n\nDEFAULT_REPO_ID = "tiny-agents/tiny-agents"\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\constants.py
constants.py
Python
2,511
0.95
0.125
0
react-lib
868
2024-01-16T00:24:41.816711
BSD-3-Clause
false
521070d1e65c008e470cc089ee83ef3d
import json\nimport logging\nfrom contextlib import AsyncExitStack\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, AsyncIterable, Dict, List, Literal, Optional, Union, overload\n\nfrom typing_extensions import NotRequired, TypeAlias, TypedDict, Unpack\n\nfrom ...utils._runtime import get_hf_hub_version\nfrom .._generated._async_client import AsyncInferenceClient\nfrom .._generated.types import (\n ChatCompletionInputMessage,\n ChatCompletionInputTool,\n ChatCompletionStreamOutput,\n ChatCompletionStreamOutputDeltaToolCall,\n)\nfrom .._providers import PROVIDER_OR_POLICY_T\nfrom .utils import format_result\n\n\nif TYPE_CHECKING:\n from mcp import ClientSession\n\nlogger = logging.getLogger(__name__)\n\n# Type alias for tool names\nToolName: TypeAlias = str\n\nServerType: TypeAlias = Literal["stdio", "sse", "http"]\n\n\nclass StdioServerParameters_T(TypedDict):\n command: str\n args: NotRequired[List[str]]\n env: NotRequired[Dict[str, str]]\n cwd: NotRequired[Union[str, Path, None]]\n\n\nclass SSEServerParameters_T(TypedDict):\n url: str\n headers: NotRequired[Dict[str, Any]]\n timeout: NotRequired[float]\n sse_read_timeout: NotRequired[float]\n\n\nclass StreamableHTTPParameters_T(TypedDict):\n url: str\n headers: NotRequired[dict[str, Any]]\n timeout: NotRequired[timedelta]\n sse_read_timeout: NotRequired[timedelta]\n terminate_on_close: NotRequired[bool]\n\n\nclass MCPClient:\n """\n Client for connecting to one or more MCP servers and processing chat completions with tools.\n\n <Tip warning={true}>\n\n This class is experimental and might be subject to breaking changes in the future without prior notice.\n\n </Tip>\n\n Args:\n model (`str`, `optional`):\n The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct`\n or a URL to a deployed Inference Endpoint or other local or remote endpoint.\n provider (`str`, *optional*):\n Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers.\n If model is a URL or `base_url` is passed, then `provider` is not used.\n base_url (`str`, *optional*):\n The base URL to run inference. Defaults to None.\n api_key (`str`, `optional`):\n Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service.\n """\n\n def __init__(\n self,\n *,\n model: Optional[str] = None,\n provider: Optional[PROVIDER_OR_POLICY_T] = None,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n ):\n # Initialize MCP sessions as a dictionary of ClientSession objects\n self.sessions: Dict[ToolName, "ClientSession"] = {}\n self.exit_stack = AsyncExitStack()\n self.available_tools: List[ChatCompletionInputTool] = []\n # To be able to send the model in the payload if `base_url` is provided\n if model is None and base_url is None:\n raise ValueError("At least one of `model` or `base_url` should be set in `MCPClient`.")\n self.payload_model = model\n self.client = AsyncInferenceClient(\n model=None if base_url is not None else model,\n provider=provider,\n api_key=api_key,\n base_url=base_url,\n )\n\n async def __aenter__(self):\n """Enter the context manager"""\n await self.client.__aenter__()\n await self.exit_stack.__aenter__()\n return self\n\n async def __aexit__(self, exc_type, exc_val, exc_tb):\n """Exit the context manager"""\n await self.client.__aexit__(exc_type, exc_val, exc_tb)\n await self.cleanup()\n\n async def cleanup(self):\n """Clean up resources"""\n await self.client.close()\n await self.exit_stack.aclose()\n\n @overload\n async def add_mcp_server(self, type: Literal["stdio"], **params: Unpack[StdioServerParameters_T]): ...\n\n @overload\n async def add_mcp_server(self, type: Literal["sse"], **params: Unpack[SSEServerParameters_T]): ...\n\n @overload\n async def add_mcp_server(self, type: Literal["http"], **params: Unpack[StreamableHTTPParameters_T]): ...\n\n async def add_mcp_server(self, type: ServerType, **params: Any):\n """Connect to an MCP server\n\n Args:\n type (`str`):\n Type of the server to connect to. Can be one of:\n - "stdio": Standard input/output server (local)\n - "sse": Server-sent events (SSE) server\n - "http": StreamableHTTP server\n **params (`Dict[str, Any]`):\n Server parameters that can be either:\n - For stdio servers:\n - command (str): The command to run the MCP server\n - args (List[str], optional): Arguments for the command\n - env (Dict[str, str], optional): Environment variables for the command\n - cwd (Union[str, Path, None], optional): Working directory for the command\n - For SSE servers:\n - url (str): The URL of the SSE server\n - headers (Dict[str, Any], optional): Headers for the SSE connection\n - timeout (float, optional): Connection timeout\n - sse_read_timeout (float, optional): SSE read timeout\n - For StreamableHTTP servers:\n - url (str): The URL of the StreamableHTTP server\n - headers (Dict[str, Any], optional): Headers for the StreamableHTTP connection\n - timeout (timedelta, optional): Connection timeout\n - sse_read_timeout (timedelta, optional): SSE read timeout\n - terminate_on_close (bool, optional): Whether to terminate on close\n """\n from mcp import ClientSession, StdioServerParameters\n from mcp import types as mcp_types\n\n # Determine server type and create appropriate parameters\n if type == "stdio":\n # Handle stdio server\n from mcp.client.stdio import stdio_client\n\n logger.info(f"Connecting to stdio MCP server with command: {params['command']} {params.get('args', [])}")\n\n client_kwargs = {"command": params["command"]}\n for key in ["args", "env", "cwd"]:\n if params.get(key) is not None:\n client_kwargs[key] = params[key]\n server_params = StdioServerParameters(**client_kwargs)\n read, write = await self.exit_stack.enter_async_context(stdio_client(server_params))\n elif type == "sse":\n # Handle SSE server\n from mcp.client.sse import sse_client\n\n logger.info(f"Connecting to SSE MCP server at: {params['url']}")\n\n client_kwargs = {"url": params["url"]}\n for key in ["headers", "timeout", "sse_read_timeout"]:\n if params.get(key) is not None:\n client_kwargs[key] = params[key]\n read, write = await self.exit_stack.enter_async_context(sse_client(**client_kwargs))\n elif type == "http":\n # Handle StreamableHTTP server\n from mcp.client.streamable_http import streamablehttp_client\n\n logger.info(f"Connecting to StreamableHTTP MCP server at: {params['url']}")\n\n client_kwargs = {"url": params["url"]}\n for key in ["headers", "timeout", "sse_read_timeout", "terminate_on_close"]:\n if params.get(key) is not None:\n client_kwargs[key] = params[key]\n read, write, _ = await self.exit_stack.enter_async_context(streamablehttp_client(**client_kwargs))\n # ^ TODO: should be handle `get_session_id_callback`? (function to retrieve the current session ID)\n else:\n raise ValueError(f"Unsupported server type: {type}")\n\n session = await self.exit_stack.enter_async_context(\n ClientSession(\n read_stream=read,\n write_stream=write,\n client_info=mcp_types.Implementation(\n name="huggingface_hub.MCPClient",\n version=get_hf_hub_version(),\n ),\n )\n )\n\n logger.debug("Initializing session...")\n await session.initialize()\n\n # List available tools\n response = await session.list_tools()\n logger.debug("Connected to server with tools:", [tool.name for tool in response.tools])\n\n for tool in response.tools:\n if tool.name in self.sessions:\n logger.warning(f"Tool '{tool.name}' already defined by another server. Skipping.")\n continue\n\n # Map tool names to their server for later lookup\n self.sessions[tool.name] = session\n\n # Add tool to the list of available tools (for use in chat completions)\n self.available_tools.append(\n ChatCompletionInputTool.parse_obj_as_instance(\n {\n "type": "function",\n "function": {\n "name": tool.name,\n "description": tool.description,\n "parameters": tool.inputSchema,\n },\n }\n )\n )\n\n async def process_single_turn_with_tools(\n self,\n messages: List[Union[Dict, ChatCompletionInputMessage]],\n exit_loop_tools: Optional[List[ChatCompletionInputTool]] = None,\n exit_if_first_chunk_no_tool: bool = False,\n ) -> AsyncIterable[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage]]:\n """Process a query using `self.model` and available tools, yielding chunks and tool outputs.\n\n Args:\n messages (`List[Dict]`):\n List of message objects representing the conversation history\n exit_loop_tools (`List[ChatCompletionInputTool]`, *optional*):\n List of tools that should exit the generator when called\n exit_if_first_chunk_no_tool (`bool`, *optional*):\n Exit if no tool is present in the first chunks. Default to False.\n\n Yields:\n [`ChatCompletionStreamOutput`] chunks or [`ChatCompletionInputMessage`] objects\n """\n # Prepare tools list based on options\n tools = self.available_tools\n if exit_loop_tools is not None:\n tools = [*exit_loop_tools, *self.available_tools]\n\n # Create the streaming request\n response = await self.client.chat.completions.create(\n model=self.payload_model,\n messages=messages,\n tools=tools,\n tool_choice="auto",\n stream=True,\n )\n\n message: Dict[str, Any] = {"role": "unknown", "content": ""}\n final_tool_calls: Dict[int, ChatCompletionStreamOutputDeltaToolCall] = {}\n num_of_chunks = 0\n\n # Read from stream\n async for chunk in response:\n num_of_chunks += 1\n delta = chunk.choices[0].delta if chunk.choices and len(chunk.choices) > 0 else None\n if not delta:\n continue\n\n # Process message\n if delta.role:\n message["role"] = delta.role\n if delta.content:\n message["content"] += delta.content\n\n # Process tool calls\n if delta.tool_calls:\n for tool_call in delta.tool_calls:\n # Aggregate chunks into tool calls\n if tool_call.index not in final_tool_calls:\n if (\n tool_call.function.arguments is None or tool_call.function.arguments == "{}"\n ): # Corner case (depends on provider)\n tool_call.function.arguments = ""\n final_tool_calls[tool_call.index] = tool_call\n\n elif tool_call.function.arguments:\n final_tool_calls[tool_call.index].function.arguments += tool_call.function.arguments\n\n # Optionally exit early if no tools in first chunks\n if exit_if_first_chunk_no_tool and num_of_chunks <= 2 and len(final_tool_calls) == 0:\n return\n\n # Yield each chunk to caller\n yield chunk\n\n # Add the assistant message with tool calls (if any) to messages\n if message["content"] or final_tool_calls:\n # if the role is unknown, set it to assistant\n if message.get("role") == "unknown":\n message["role"] = "assistant"\n # Convert final_tool_calls to the format expected by OpenAI\n if final_tool_calls:\n tool_calls_list: List[Dict[str, Any]] = []\n for tc in final_tool_calls.values():\n tool_calls_list.append(\n {\n "id": tc.id,\n "type": "function",\n "function": {\n "name": tc.function.name,\n "arguments": tc.function.arguments or "{}",\n },\n }\n )\n message["tool_calls"] = tool_calls_list\n messages.append(message)\n\n # Process tool calls one by one\n for tool_call in final_tool_calls.values():\n function_name = tool_call.function.name\n try:\n function_args = json.loads(tool_call.function.arguments or "{}")\n except json.JSONDecodeError as err:\n tool_message = {\n "role": "tool",\n "tool_call_id": tool_call.id,\n "name": function_name,\n "content": f"Invalid JSON generated by the model: {err}",\n }\n tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)\n messages.append(tool_message_as_obj)\n yield tool_message_as_obj\n continue # move to next tool call\n\n tool_message = {"role": "tool", "tool_call_id": tool_call.id, "content": "", "name": function_name}\n\n # Check if this is an exit loop tool\n if exit_loop_tools and function_name in [t.function.name for t in exit_loop_tools]:\n tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)\n messages.append(tool_message_as_obj)\n yield tool_message_as_obj\n return\n\n # Execute tool call with the appropriate session\n session = self.sessions.get(function_name)\n if session is not None:\n try:\n result = await session.call_tool(function_name, function_args)\n tool_message["content"] = format_result(result)\n except Exception as err:\n tool_message["content"] = f"Error: MCP tool call failed with error message: {err}"\n else:\n tool_message["content"] = f"Error: No session found for tool: {function_name}"\n\n # Yield tool message\n tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message)\n messages.append(tool_message_as_obj)\n yield tool_message_as_obj\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\mcp_client.py
mcp_client.py
Python
15,788
0.95
0.227642
0.090032
vue-tools
118
2024-10-16T00:34:03.635358
Apache-2.0
false
a6a12867f7dc5577e855f82771129143
from typing import Dict, List, Literal, TypedDict, Union\n\n\nclass InputConfig(TypedDict, total=False):\n id: str\n description: str\n type: str\n password: bool\n\n\nclass StdioServerConfig(TypedDict):\n type: Literal["stdio"]\n command: str\n args: List[str]\n env: Dict[str, str]\n cwd: str\n\n\nclass HTTPServerConfig(TypedDict):\n type: Literal["http"]\n url: str\n headers: Dict[str, str]\n\n\nclass SSEServerConfig(TypedDict):\n type: Literal["sse"]\n url: str\n headers: Dict[str, str]\n\n\nServerConfig = Union[StdioServerConfig, HTTPServerConfig, SSEServerConfig]\n\n\n# AgentConfig root object\nclass AgentConfig(TypedDict):\n model: str\n provider: str\n inputs: List[InputConfig]\n servers: List[ServerConfig]\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\types.py
types.py
Python
743
0.95
0.128205
0.037037
react-lib
162
2025-02-09T12:04:24.678154
MIT
false
776be91270daeaed7a4e439a84fddd1e
"""\nUtility functions for MCPClient and Tiny Agents.\n\nFormatting utilities taken from the JS SDK: https://github.com/huggingface/huggingface.js/blob/main/packages/mcp-client/src/ResultFormatter.ts.\n"""\n\nimport json\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, List, Optional, Tuple\n\nfrom huggingface_hub import snapshot_download\nfrom huggingface_hub.errors import EntryNotFoundError\n\nfrom .constants import DEFAULT_AGENT, DEFAULT_REPO_ID, FILENAME_CONFIG, FILENAME_PROMPT\nfrom .types import AgentConfig\n\n\nif TYPE_CHECKING:\n from mcp import types as mcp_types\n\n\ndef format_result(result: "mcp_types.CallToolResult") -> str:\n """\n Formats a mcp.types.CallToolResult content into a human-readable string.\n\n Args:\n result (CallToolResult)\n Object returned by mcp.ClientSession.call_tool.\n\n Returns:\n str\n A formatted string representing the content of the result.\n """\n content = result.content\n\n if len(content) == 0:\n return "[No content]"\n\n formatted_parts: List[str] = []\n\n for item in content:\n if item.type == "text":\n formatted_parts.append(item.text)\n\n elif item.type == "image":\n formatted_parts.append(\n f"[Binary Content: Image {item.mimeType}, {_get_base64_size(item.data)} bytes]\n"\n f"The task is complete and the content accessible to the User"\n )\n\n elif item.type == "audio":\n formatted_parts.append(\n f"[Binary Content: Audio {item.mimeType}, {_get_base64_size(item.data)} bytes]\n"\n f"The task is complete and the content accessible to the User"\n )\n\n elif item.type == "resource":\n resource = item.resource\n\n if hasattr(resource, "text"):\n formatted_parts.append(resource.text)\n\n elif hasattr(resource, "blob"):\n formatted_parts.append(\n f"[Binary Content ({resource.uri}): {resource.mimeType}, {_get_base64_size(resource.blob)} bytes]\n"\n f"The task is complete and the content accessible to the User"\n )\n\n return "\n".join(formatted_parts)\n\n\ndef _get_base64_size(base64_str: str) -> int:\n """Estimate the byte size of a base64-encoded string."""\n # Remove any prefix like "data:image/png;base64,"\n if "," in base64_str:\n base64_str = base64_str.split(",")[1]\n\n padding = 0\n if base64_str.endswith("=="):\n padding = 2\n elif base64_str.endswith("="):\n padding = 1\n\n return (len(base64_str) * 3) // 4 - padding\n\n\ndef _load_agent_config(agent_path: Optional[str]) -> Tuple[AgentConfig, Optional[str]]:\n """Load server config and prompt."""\n\n def _read_dir(directory: Path) -> Tuple[AgentConfig, Optional[str]]:\n cfg_file = directory / FILENAME_CONFIG\n if not cfg_file.exists():\n raise FileNotFoundError(f" Config file not found in {directory}! Please make sure it exists locally")\n\n config: AgentConfig = json.loads(cfg_file.read_text(encoding="utf-8"))\n prompt_file = directory / FILENAME_PROMPT\n prompt: Optional[str] = prompt_file.read_text(encoding="utf-8") if prompt_file.exists() else None\n return config, prompt\n\n if agent_path is None:\n return DEFAULT_AGENT, None # type: ignore[return-value]\n\n path = Path(agent_path).expanduser()\n\n if path.is_file():\n return json.loads(path.read_text(encoding="utf-8")), None\n\n if path.is_dir():\n return _read_dir(path)\n\n # fetch from the Hub\n try:\n repo_dir = Path(\n snapshot_download(\n repo_id=DEFAULT_REPO_ID,\n allow_patterns=f"{agent_path}/*",\n repo_type="dataset",\n )\n )\n return _read_dir(repo_dir / agent_path)\n except Exception as err:\n raise EntryNotFoundError(\n f" Agent {agent_path} not found in tiny-agents/tiny-agents! Please make sure it exists in https://huggingface.co/datasets/tiny-agents/tiny-agents."\n ) from err\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\utils.py
utils.py
Python
4,093
0.95
0.145161
0.021739
awesome-app
510
2024-02-25T20:02:24.712536
MIT
false
5e65f100d2d62524ade513bf7a1cad4a
import asyncio\nimport sys\nfrom functools import partial\n\nimport typer\n\n\ndef _patch_anyio_open_process():\n """\n Patch anyio.open_process to allow detached processes on Windows and Unix-like systems.\n\n This is necessary to prevent the MCP client from being interrupted by Ctrl+C when running in the CLI.\n """\n import subprocess\n\n import anyio\n\n if getattr(anyio, "_tiny_agents_patched", False):\n return\n anyio._tiny_agents_patched = True\n\n original_open_process = anyio.open_process\n\n if sys.platform == "win32":\n # On Windows, we need to set the creation flags to create a new process group\n\n async def open_process_in_new_group(*args, **kwargs):\n """\n Wrapper for open_process to handle Windows-specific process creation flags.\n """\n # Ensure we pass the creation flags for Windows\n kwargs.setdefault("creationflags", subprocess.CREATE_NEW_PROCESS_GROUP)\n return await original_open_process(*args, **kwargs)\n\n anyio.open_process = open_process_in_new_group\n else:\n # For Unix-like systems, we can use setsid to create a new session\n async def open_process_in_new_group(*args, **kwargs):\n """\n Wrapper for open_process to handle Unix-like systems with start_new_session=True.\n """\n kwargs.setdefault("start_new_session", True)\n return await original_open_process(*args, **kwargs)\n\n anyio.open_process = open_process_in_new_group\n\n\nasync def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str:\n """\n Asynchronous prompt function that reads input from stdin without blocking.\n\n This function is designed to work in an asynchronous context, allowing the event loop to gracefully stop it (e.g. on Ctrl+C).\n\n Alternatively, we could use https://github.com/vxgmichel/aioconsole but that would be an additional dependency.\n """\n loop = asyncio.get_event_loop()\n\n if sys.platform == "win32":\n # Windows: Use run_in_executor to avoid blocking the event loop\n # Degraded solution: this is not ideal as user will have to CTRL+C once more to stop the prompt (and it'll not be graceful)\n return await loop.run_in_executor(None, partial(typer.prompt, prompt, prompt_suffix=" "))\n else:\n # UNIX-like: Use loop.add_reader for non-blocking stdin read\n future = loop.create_future()\n\n def on_input():\n line = sys.stdin.readline()\n loop.remove_reader(sys.stdin)\n future.set_result(line)\n\n print(prompt, end=" ", flush=True)\n loop.add_reader(sys.stdin, on_input) # not supported on Windows\n\n # Wait for user input or exit event\n # Wait until either the user hits enter or exit_event is set\n exit_task = asyncio.create_task(exit_event.wait())\n await asyncio.wait(\n [future, exit_task],\n return_when=asyncio.FIRST_COMPLETED,\n )\n\n # Check which one has been triggered\n if exit_event.is_set():\n future.cancel()\n return ""\n\n line = await future\n return line.strip()\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\_cli_hacks.py
_cli_hacks.py
Python
3,182
0.95
0.181818
0.134328
vue-tools
489
2023-10-18T00:52:40.222054
MIT
false
9ad1112417a49b7fdecea425ab3dbde1
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\agent.cpython-313.pyc
agent.cpython-313.pyc
Other
5,442
0.95
0.095238
0
awesome-app
95
2024-11-30T11:28:04.235294
BSD-3-Clause
false
19f73bd4f4cefef0d404f055c231f3de
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\cli.cpython-313.pyc
cli.cpython-313.pyc
Other
11,701
0.95
0.007752
0
awesome-app
663
2024-08-31T05:39:31.571456
BSD-3-Clause
false
26e961e2d3feef9401429e138657638f
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\constants.cpython-313.pyc
constants.cpython-313.pyc
Other
2,491
0.95
0.171429
0
python-kit
948
2024-10-26T19:58:09.835158
GPL-3.0
false
58c7d114184e20e3ce8fda799bf0f702
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\mcp_client.cpython-313.pyc
mcp_client.cpython-313.pyc
Other
17,166
0.95
0.072165
0.010989
vue-tools
784
2024-05-05T01:06:38.746751
GPL-3.0
false
2492328c58d21d30f4ac18fd0082757f
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\types.cpython-313.pyc
types.cpython-313.pyc
Other
2,005
0.8
0
0
react-lib
539
2024-11-05T18:22:47.636386
GPL-3.0
false
d9ab46ef02d42ff50d45cfddeae3f00a
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\utils.cpython-313.pyc
utils.cpython-313.pyc
Other
5,411
0.95
0.015385
0
python-kit
162
2023-09-19T05:52:17.966132
BSD-3-Clause
false
5fee6d1317cf12f01cf4610323a3717a
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\_cli_hacks.cpython-313.pyc
_cli_hacks.cpython-313.pyc
Other
3,956
0.95
0.093023
0
python-kit
211
2024-07-30T20:48:51.886876
MIT
false
028d67edfece5654ed9b6cc91873f333
\n\n
.venv\Lib\site-packages\huggingface_hub\inference\_mcp\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
205
0.7
0
0
react-lib
479
2024-08-03T03:12:22.478625
GPL-3.0
false
db7001e4a3dec42a17fe936a181401db
import time\nfrom typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\nfrom huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none\nfrom huggingface_hub.utils import logging\nfrom huggingface_hub.utils._http import get_session\n\n\nlogger = logging.get_logger(__name__)\n\nMAX_POLLING_ATTEMPTS = 6\nPOLLING_INTERVAL = 1.0\n\n\nclass BlackForestLabsTextToImageTask(TaskProviderHelper):\n def __init__(self):\n super().__init__(provider="black-forest-labs", base_url="https://api.us1.bfl.ai", task="text-to-image")\n\n def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:\n headers = super()._prepare_headers(headers, api_key)\n if not api_key.startswith("hf_"):\n _ = headers.pop("authorization")\n headers["X-Key"] = api_key\n return headers\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return f"/v1/{mapped_model}"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n parameters = filter_none(parameters)\n if "num_inference_steps" in parameters:\n parameters["steps"] = parameters.pop("num_inference_steps")\n if "guidance_scale" in parameters:\n parameters["guidance"] = parameters.pop("guidance_scale")\n\n return {"prompt": inputs, **parameters}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n """\n Polling mechanism for Black Forest Labs since the API is asynchronous.\n """\n url = _as_dict(response).get("polling_url")\n session = get_session()\n for _ in range(MAX_POLLING_ATTEMPTS):\n time.sleep(POLLING_INTERVAL)\n\n response = session.get(url, headers={"Content-Type": "application/json"}) # type: ignore\n response.raise_for_status() # type: ignore\n response_json: Dict = response.json() # type: ignore\n status = response_json.get("status")\n logger.info(\n f"Polling generation result from {url}. Current status: {status}. "\n f"Will retry after {POLLING_INTERVAL} seconds if not ready."\n )\n\n if (\n status == "Ready"\n and isinstance(response_json.get("result"), dict)\n and (sample_url := response_json["result"].get("sample"))\n ):\n image_resp = session.get(sample_url)\n image_resp.raise_for_status()\n return image_resp.content\n\n raise TimeoutError(f"Failed to get the image URL after {MAX_POLLING_ATTEMPTS} attempts.")\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\black_forest_labs.py
black_forest_labs.py
Python
2,842
0.95
0.188406
0
node-utils
571
2025-07-05T01:48:56.966475
GPL-3.0
false
c83d7ebf8dad682742d605fc9cd82282
from ._common import BaseConversationalTask\n\n\nclass CerebrasConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider="cerebras", base_url="https://api.cerebras.ai")\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\cerebras.py
cerebras.py
Python
210
0.95
0.333333
0
python-kit
439
2025-01-14T07:12:59.433290
Apache-2.0
false
4db8afa189d09fff48ff3f7428ba24cf
from typing import Any, Dict, Optional\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\n\nfrom ._common import BaseConversationalTask\n\n\n_PROVIDER = "cohere"\n_BASE_URL = "https://api.cohere.com"\n\n\nclass CohereConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return "/compatibility/v1/chat/completions"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info)\n response_format = parameters.get("response_format")\n if isinstance(response_format, dict) and response_format.get("type") == "json_schema":\n json_schema_details = response_format.get("json_schema")\n if isinstance(json_schema_details, dict) and "schema" in json_schema_details:\n payload["response_format"] = { # type: ignore [index]\n "type": "json_object",\n "schema": json_schema_details["schema"],\n }\n\n return payload\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\cohere.py
cohere.py
Python
1,253
0.95
0.1875
0
python-kit
469
2025-04-01T08:18:07.224372
BSD-3-Clause
false
7dbf01668a22043b2a7c8496718c6fbb
import base64\nimport time\nfrom abc import ABC\nfrom typing import Any, Dict, Optional, Union\nfrom urllib.parse import urlparse\n\nfrom huggingface_hub import constants\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\nfrom huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none\nfrom huggingface_hub.utils import get_session, hf_raise_for_status\nfrom huggingface_hub.utils.logging import get_logger\n\n\nlogger = get_logger(__name__)\n\n# Arbitrary polling interval\n_POLLING_INTERVAL = 0.5\n\n\nclass FalAITask(TaskProviderHelper, ABC):\n def __init__(self, task: str):\n super().__init__(provider="fal-ai", base_url="https://fal.run", task=task)\n\n def _prepare_headers(self, headers: Dict, api_key: str) -> Dict:\n headers = super()._prepare_headers(headers, api_key)\n if not api_key.startswith("hf_"):\n headers["authorization"] = f"Key {api_key}"\n return headers\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n return f"/{mapped_model}"\n\n\nclass FalAIAutomaticSpeechRecognitionTask(FalAITask):\n def __init__(self):\n super().__init__("automatic-speech-recognition")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n if isinstance(inputs, str) and inputs.startswith(("http://", "https://")):\n # If input is a URL, pass it directly\n audio_url = inputs\n else:\n # If input is a file path, read it first\n if isinstance(inputs, str):\n with open(inputs, "rb") as f:\n inputs = f.read()\n\n audio_b64 = base64.b64encode(inputs).decode()\n content_type = "audio/mpeg"\n audio_url = f"data:{content_type};base64,{audio_b64}"\n\n return {"audio_url": audio_url, **filter_none(parameters)}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n text = _as_dict(response)["text"]\n if not isinstance(text, str):\n raise ValueError(f"Unexpected output format from FalAI API. Expected string, got {type(text)}.")\n return text\n\n\nclass FalAITextToImageTask(FalAITask):\n def __init__(self):\n super().__init__("text-to-image")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n payload: Dict[str, Any] = {\n "prompt": inputs,\n **filter_none(parameters),\n }\n if "width" in payload and "height" in payload:\n payload["image_size"] = {\n "width": payload.pop("width"),\n "height": payload.pop("height"),\n }\n if provider_mapping_info.adapter_weights_path is not None:\n lora_path = constants.HUGGINGFACE_CO_URL_TEMPLATE.format(\n repo_id=provider_mapping_info.hf_model_id,\n revision="main",\n filename=provider_mapping_info.adapter_weights_path,\n )\n payload["loras"] = [{"path": lora_path, "scale": 1}]\n if provider_mapping_info.provider_id == "fal-ai/lora":\n # little hack: fal requires the base model for stable-diffusion-based loras but not for flux-based\n # See payloads in https://fal.ai/models/fal-ai/lora/api vs https://fal.ai/models/fal-ai/flux-lora/api\n payload["model_name"] = "stabilityai/stable-diffusion-xl-base-1.0"\n\n return payload\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n url = _as_dict(response)["images"][0]["url"]\n return get_session().get(url).content\n\n\nclass FalAITextToSpeechTask(FalAITask):\n def __init__(self):\n super().__init__("text-to-speech")\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n return {"text": inputs, **filter_none(parameters)}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n url = _as_dict(response)["audio"]["url"]\n return get_session().get(url).content\n\n\nclass FalAITextToVideoTask(FalAITask):\n def __init__(self):\n super().__init__("text-to-video")\n\n def _prepare_base_url(self, api_key: str) -> str:\n if api_key.startswith("hf_"):\n return super()._prepare_base_url(api_key)\n else:\n logger.info(f"Calling '{self.provider}' provider directly.")\n return "https://queue.fal.run"\n\n def _prepare_route(self, mapped_model: str, api_key: str) -> str:\n if api_key.startswith("hf_"):\n # Use the queue subdomain for HF routing\n return f"/{mapped_model}?_subdomain=queue"\n return f"/{mapped_model}"\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n return {"prompt": inputs, **filter_none(parameters)}\n\n def get_response(\n self,\n response: Union[bytes, Dict],\n request_params: Optional[RequestParameters] = None,\n ) -> Any:\n response_dict = _as_dict(response)\n\n request_id = response_dict.get("request_id")\n if not request_id:\n raise ValueError("No request ID found in the response")\n if request_params is None:\n raise ValueError(\n "A `RequestParameters` object should be provided to get text-to-video responses with Fal AI."\n )\n\n # extract the base url and query params\n parsed_url = urlparse(request_params.url)\n # a bit hacky way to concatenate the provider name without parsing `parsed_url.path`\n base_url = f"{parsed_url.scheme}://{parsed_url.netloc}{'/fal-ai' if parsed_url.netloc == 'router.huggingface.co' else ''}"\n query_param = f"?{parsed_url.query}" if parsed_url.query else ""\n\n # extracting the provider model id for status and result urls\n # from the response as it might be different from the mapped model in `request_params.url`\n model_id = urlparse(response_dict.get("response_url")).path\n status_url = f"{base_url}{str(model_id)}/status{query_param}"\n result_url = f"{base_url}{str(model_id)}{query_param}"\n\n status = response_dict.get("status")\n logger.info("Generating the video.. this can take several minutes.")\n while status != "COMPLETED":\n time.sleep(_POLLING_INTERVAL)\n status_response = get_session().get(status_url, headers=request_params.headers)\n hf_raise_for_status(status_response)\n status = status_response.json().get("status")\n\n response = get_session().get(result_url, headers=request_params.headers).json()\n url = _as_dict(response)["video"]["url"]\n return get_session().get(url).content\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\fal_ai.py
fal_ai.py
Python
7,162
0.95
0.232558
0.07971
node-utils
43
2024-06-25T02:04:21.524853
MIT
false
7c34bbc76cdb2ad77ada5039cee0c328
from typing import Any, Dict, Optional, Union\n\nfrom huggingface_hub.hf_api import InferenceProviderMapping\nfrom huggingface_hub.inference._common import RequestParameters, _as_dict\n\nfrom ._common import BaseConversationalTask, BaseTextGenerationTask, filter_none\n\n\n_PROVIDER = "featherless-ai"\n_BASE_URL = "https://api.featherless.ai"\n\n\nclass FeatherlessTextGenerationTask(BaseTextGenerationTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n\n def _prepare_payload_as_dict(\n self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping\n ) -> Optional[Dict]:\n params = filter_none(parameters.copy())\n params["max_tokens"] = params.pop("max_new_tokens", None)\n\n return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id}\n\n def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any:\n output = _as_dict(response)["choices"][0]\n return {\n "generated_text": output["text"],\n "details": {\n "finish_reason": output.get("finish_reason"),\n "seed": output.get("seed"),\n },\n }\n\n\nclass FeatherlessConversationalTask(BaseConversationalTask):\n def __init__(self):\n super().__init__(provider=_PROVIDER, base_url=_BASE_URL)\n
.venv\Lib\site-packages\huggingface_hub\inference\_providers\featherless_ai.py
featherless_ai.py
Python
1,382
0.95
0.157895
0
awesome-app
980
2025-05-28T09:40:23.162530
BSD-3-Clause
false
d3f3e6ac469d88a4ea7522f3903b6230