not-pegasus commited on
Commit
993bd45
·
verified ·
1 Parent(s): cf5efe1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env/lib/python3.13/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-313.pyc +0 -0
  2. env/lib/python3.13/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-313.pyc +0 -0
  3. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/__init__.py +0 -0
  4. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-313.pyc +0 -0
  5. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/_async_client.py +0 -0
  6. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__init__.py +192 -0
  7. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-313.pyc +0 -0
  8. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-313.pyc +0 -0
  9. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-313.pyc +0 -0
  10. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-313.pyc +0 -0
  11. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-313.pyc +0 -0
  12. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-313.pyc +0 -0
  13. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-313.pyc +0 -0
  14. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-313.pyc +0 -0
  15. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-313.pyc +0 -0
  16. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-313.pyc +0 -0
  17. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-313.pyc +0 -0
  18. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-313.pyc +0 -0
  19. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_video.cpython-313.pyc +0 -0
  20. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-313.pyc +0 -0
  21. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-313.pyc +0 -0
  22. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-313.pyc +0 -0
  23. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-313.pyc +0 -0
  24. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-313.pyc +0 -0
  25. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-313.pyc +0 -0
  26. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-313.pyc +0 -0
  27. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-313.pyc +0 -0
  28. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-313.pyc +0 -0
  29. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-313.pyc +0 -0
  30. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-313.pyc +0 -0
  31. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-313.pyc +0 -0
  32. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-313.pyc +0 -0
  33. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-313.pyc +0 -0
  34. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-313.pyc +0 -0
  35. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-313.pyc +0 -0
  36. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-313.pyc +0 -0
  37. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
  38. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py +30 -0
  39. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +113 -0
  40. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/base.py +164 -0
  41. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py +347 -0
  42. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py +28 -0
  43. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py +80 -0
  44. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py +36 -0
  45. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py +47 -0
  46. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_classification.py +43 -0
  47. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py +51 -0
  48. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py +60 -0
  49. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py +100 -0
  50. env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_to_video.py +60 -0
env/lib/python3.13/site-packages/huggingface_hub/inference/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (217 Bytes). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-313.pyc ADDED
Binary file (16.9 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/__init__.py ADDED
File without changes
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (228 Bytes). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/_async_client.py ADDED
The diff for this file is too large to render. See raw diff
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__init__.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is auto-generated by `utils/generate_inference_types.py`.
2
+ # Do not modify it manually.
3
+ #
4
+ # ruff: noqa: F401
5
+
6
+ from .audio_classification import (
7
+ AudioClassificationInput,
8
+ AudioClassificationOutputElement,
9
+ AudioClassificationOutputTransform,
10
+ AudioClassificationParameters,
11
+ )
12
+ from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement
13
+ from .automatic_speech_recognition import (
14
+ AutomaticSpeechRecognitionEarlyStoppingEnum,
15
+ AutomaticSpeechRecognitionGenerationParameters,
16
+ AutomaticSpeechRecognitionInput,
17
+ AutomaticSpeechRecognitionOutput,
18
+ AutomaticSpeechRecognitionOutputChunk,
19
+ AutomaticSpeechRecognitionParameters,
20
+ )
21
+ from .base import BaseInferenceType
22
+ from .chat_completion import (
23
+ ChatCompletionInput,
24
+ ChatCompletionInputFunctionDefinition,
25
+ ChatCompletionInputFunctionName,
26
+ ChatCompletionInputGrammarType,
27
+ ChatCompletionInputJSONSchema,
28
+ ChatCompletionInputMessage,
29
+ ChatCompletionInputMessageChunk,
30
+ ChatCompletionInputMessageChunkType,
31
+ ChatCompletionInputResponseFormatJSONObject,
32
+ ChatCompletionInputResponseFormatJSONSchema,
33
+ ChatCompletionInputResponseFormatText,
34
+ ChatCompletionInputStreamOptions,
35
+ ChatCompletionInputTool,
36
+ ChatCompletionInputToolCall,
37
+ ChatCompletionInputToolChoiceClass,
38
+ ChatCompletionInputToolChoiceEnum,
39
+ ChatCompletionInputURL,
40
+ ChatCompletionOutput,
41
+ ChatCompletionOutputComplete,
42
+ ChatCompletionOutputFunctionDefinition,
43
+ ChatCompletionOutputLogprob,
44
+ ChatCompletionOutputLogprobs,
45
+ ChatCompletionOutputMessage,
46
+ ChatCompletionOutputToolCall,
47
+ ChatCompletionOutputTopLogprob,
48
+ ChatCompletionOutputUsage,
49
+ ChatCompletionStreamOutput,
50
+ ChatCompletionStreamOutputChoice,
51
+ ChatCompletionStreamOutputDelta,
52
+ ChatCompletionStreamOutputDeltaToolCall,
53
+ ChatCompletionStreamOutputFunction,
54
+ ChatCompletionStreamOutputLogprob,
55
+ ChatCompletionStreamOutputLogprobs,
56
+ ChatCompletionStreamOutputTopLogprob,
57
+ ChatCompletionStreamOutputUsage,
58
+ )
59
+ from .depth_estimation import DepthEstimationInput, DepthEstimationOutput
60
+ from .document_question_answering import (
61
+ DocumentQuestionAnsweringInput,
62
+ DocumentQuestionAnsweringInputData,
63
+ DocumentQuestionAnsweringOutputElement,
64
+ DocumentQuestionAnsweringParameters,
65
+ )
66
+ from .feature_extraction import FeatureExtractionInput, FeatureExtractionInputTruncationDirection
67
+ from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters
68
+ from .image_classification import (
69
+ ImageClassificationInput,
70
+ ImageClassificationOutputElement,
71
+ ImageClassificationOutputTransform,
72
+ ImageClassificationParameters,
73
+ )
74
+ from .image_segmentation import (
75
+ ImageSegmentationInput,
76
+ ImageSegmentationOutputElement,
77
+ ImageSegmentationParameters,
78
+ ImageSegmentationSubtask,
79
+ )
80
+ from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize
81
+ from .image_to_text import (
82
+ ImageToTextEarlyStoppingEnum,
83
+ ImageToTextGenerationParameters,
84
+ ImageToTextInput,
85
+ ImageToTextOutput,
86
+ ImageToTextParameters,
87
+ )
88
+ from .image_to_video import ImageToVideoInput, ImageToVideoOutput, ImageToVideoParameters, ImageToVideoTargetSize
89
+ from .object_detection import (
90
+ ObjectDetectionBoundingBox,
91
+ ObjectDetectionInput,
92
+ ObjectDetectionOutputElement,
93
+ ObjectDetectionParameters,
94
+ )
95
+ from .question_answering import (
96
+ QuestionAnsweringInput,
97
+ QuestionAnsweringInputData,
98
+ QuestionAnsweringOutputElement,
99
+ QuestionAnsweringParameters,
100
+ )
101
+ from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData
102
+ from .summarization import (
103
+ SummarizationInput,
104
+ SummarizationOutput,
105
+ SummarizationParameters,
106
+ SummarizationTruncationStrategy,
107
+ )
108
+ from .table_question_answering import (
109
+ Padding,
110
+ TableQuestionAnsweringInput,
111
+ TableQuestionAnsweringInputData,
112
+ TableQuestionAnsweringOutputElement,
113
+ TableQuestionAnsweringParameters,
114
+ )
115
+ from .text2text_generation import (
116
+ Text2TextGenerationInput,
117
+ Text2TextGenerationOutput,
118
+ Text2TextGenerationParameters,
119
+ Text2TextGenerationTruncationStrategy,
120
+ )
121
+ from .text_classification import (
122
+ TextClassificationInput,
123
+ TextClassificationOutputElement,
124
+ TextClassificationOutputTransform,
125
+ TextClassificationParameters,
126
+ )
127
+ from .text_generation import (
128
+ TextGenerationInput,
129
+ TextGenerationInputGenerateParameters,
130
+ TextGenerationInputGrammarType,
131
+ TextGenerationOutput,
132
+ TextGenerationOutputBestOfSequence,
133
+ TextGenerationOutputDetails,
134
+ TextGenerationOutputFinishReason,
135
+ TextGenerationOutputPrefillToken,
136
+ TextGenerationOutputToken,
137
+ TextGenerationStreamOutput,
138
+ TextGenerationStreamOutputStreamDetails,
139
+ TextGenerationStreamOutputToken,
140
+ TypeEnum,
141
+ )
142
+ from .text_to_audio import (
143
+ TextToAudioEarlyStoppingEnum,
144
+ TextToAudioGenerationParameters,
145
+ TextToAudioInput,
146
+ TextToAudioOutput,
147
+ TextToAudioParameters,
148
+ )
149
+ from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters
150
+ from .text_to_speech import (
151
+ TextToSpeechEarlyStoppingEnum,
152
+ TextToSpeechGenerationParameters,
153
+ TextToSpeechInput,
154
+ TextToSpeechOutput,
155
+ TextToSpeechParameters,
156
+ )
157
+ from .text_to_video import TextToVideoInput, TextToVideoOutput, TextToVideoParameters
158
+ from .token_classification import (
159
+ TokenClassificationAggregationStrategy,
160
+ TokenClassificationInput,
161
+ TokenClassificationOutputElement,
162
+ TokenClassificationParameters,
163
+ )
164
+ from .translation import TranslationInput, TranslationOutput, TranslationParameters, TranslationTruncationStrategy
165
+ from .video_classification import (
166
+ VideoClassificationInput,
167
+ VideoClassificationOutputElement,
168
+ VideoClassificationOutputTransform,
169
+ VideoClassificationParameters,
170
+ )
171
+ from .visual_question_answering import (
172
+ VisualQuestionAnsweringInput,
173
+ VisualQuestionAnsweringInputData,
174
+ VisualQuestionAnsweringOutputElement,
175
+ VisualQuestionAnsweringParameters,
176
+ )
177
+ from .zero_shot_classification import (
178
+ ZeroShotClassificationInput,
179
+ ZeroShotClassificationOutputElement,
180
+ ZeroShotClassificationParameters,
181
+ )
182
+ from .zero_shot_image_classification import (
183
+ ZeroShotImageClassificationInput,
184
+ ZeroShotImageClassificationOutputElement,
185
+ ZeroShotImageClassificationParameters,
186
+ )
187
+ from .zero_shot_object_detection import (
188
+ ZeroShotObjectDetectionBoundingBox,
189
+ ZeroShotObjectDetectionInput,
190
+ ZeroShotObjectDetectionOutputElement,
191
+ ZeroShotObjectDetectionParameters,
192
+ )
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (7.81 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-313.pyc ADDED
Binary file (1.73 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_to_audio.cpython-313.pyc ADDED
Binary file (1.17 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-313.pyc ADDED
Binary file (3.49 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/base.cpython-313.pyc ADDED
Binary file (7.98 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-313.pyc ADDED
Binary file (1.21 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-313.pyc ADDED
Binary file (2.56 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-313.pyc ADDED
Binary file (1.72 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_classification.cpython-313.pyc ADDED
Binary file (1.74 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_segmentation.cpython-313.pyc ADDED
Binary file (1.94 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_image.cpython-313.pyc ADDED
Binary file (2.22 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_text.cpython-313.pyc ADDED
Binary file (3.03 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/image_to_video.cpython-313.pyc ADDED
Binary file (2.21 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/object_detection.cpython-313.pyc ADDED
Binary file (2.03 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-313.pyc ADDED
Binary file (2.35 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-313.pyc ADDED
Binary file (1.19 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/summarization.cpython-313.pyc ADDED
Binary file (1.82 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/table_question_answering.cpython-313.pyc ADDED
Binary file (2.32 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text2text_generation.cpython-313.pyc ADDED
Binary file (1.97 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_classification.cpython-313.pyc ADDED
Binary file (1.73 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-313.pyc ADDED
Binary file (6.47 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_audio.cpython-313.pyc ADDED
Binary file (2.92 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-313.pyc ADDED
Binary file (1.85 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-313.pyc ADDED
Binary file (2.96 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-313.pyc ADDED
Binary file (1.76 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/token_classification.cpython-313.pyc ADDED
Binary file (2.01 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-313.pyc ADDED
Binary file (1.92 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/video_classification.cpython-313.pyc ADDED
Binary file (1.88 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_image_classification.cpython-313.pyc ADDED
Binary file (1.67 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/__pycache__/zero_shot_object_detection.cpython-313.pyc ADDED
Binary file (2 kB). View file
 
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ AudioClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class AudioClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Audio Classification"""
17
+
18
+ function_to_apply: Optional["AudioClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class AudioClassificationInput(BaseInferenceType):
26
+ """Inputs for Audio Classification inference"""
27
+
28
+ inputs: str
29
+ """The input audio data as a base64-encoded string. If no `parameters` are provided, you can
30
+ also provide the audio data as a raw bytes payload.
31
+ """
32
+ parameters: Optional[AudioClassificationParameters] = None
33
+ """Additional inference parameters for Audio Classification"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class AudioClassificationOutputElement(BaseInferenceType):
38
+ """Outputs for Audio Classification inference"""
39
+
40
+ label: str
41
+ """The predicted class label."""
42
+ score: float
43
+ """The corresponding probability."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/audio_to_audio.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class AudioToAudioInput(BaseInferenceType):
13
+ """Inputs for Audio to Audio inference"""
14
+
15
+ inputs: Any
16
+ """The input audio data"""
17
+
18
+
19
+ @dataclass_with_extra
20
+ class AudioToAudioOutputElement(BaseInferenceType):
21
+ """Outputs of inference for the Audio To Audio task
22
+ A generated audio file with its label.
23
+ """
24
+
25
+ blob: Any
26
+ """The generated audio file."""
27
+ content_type: str
28
+ """The content type of audio file."""
29
+ label: str
30
+ """The label of the audio file."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ AutomaticSpeechRecognitionEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "AutomaticSpeechRecognitionEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class AutomaticSpeechRecognitionParameters(BaseInferenceType):
76
+ """Additional inference parameters for Automatic Speech Recognition"""
77
+
78
+ generation_parameters: Optional[AutomaticSpeechRecognitionGenerationParameters] = None
79
+ """Parametrization of the text generation process"""
80
+ return_timestamps: Optional[bool] = None
81
+ """Whether to output corresponding timestamps with the generated text"""
82
+
83
+
84
+ @dataclass_with_extra
85
+ class AutomaticSpeechRecognitionInput(BaseInferenceType):
86
+ """Inputs for Automatic Speech Recognition inference"""
87
+
88
+ inputs: str
89
+ """The input audio data as a base64-encoded string. If no `parameters` are provided, you can
90
+ also provide the audio data as a raw bytes payload.
91
+ """
92
+ parameters: Optional[AutomaticSpeechRecognitionParameters] = None
93
+ """Additional inference parameters for Automatic Speech Recognition"""
94
+
95
+
96
+ @dataclass_with_extra
97
+ class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):
98
+ text: str
99
+ """A chunk of text identified by the model"""
100
+ timestamp: list[float]
101
+ """The start and end timestamps corresponding with the text"""
102
+
103
+
104
+ @dataclass_with_extra
105
+ class AutomaticSpeechRecognitionOutput(BaseInferenceType):
106
+ """Outputs of inference for the Automatic Speech Recognition task"""
107
+
108
+ text: str
109
+ """The recognized text."""
110
+ chunks: Optional[list[AutomaticSpeechRecognitionOutputChunk]] = None
111
+ """When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
112
+ the model.
113
+ """
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/base.py ADDED
@@ -0,0 +1,164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a base class for all inference types."""
15
+
16
+ import inspect
17
+ import json
18
+ import types
19
+ from dataclasses import asdict, dataclass
20
+ from typing import Any, TypeVar, Union, get_args
21
+
22
+
23
+ T = TypeVar("T", bound="BaseInferenceType")
24
+
25
+
26
+ def _repr_with_extra(self):
27
+ fields = list(self.__dataclass_fields__.keys())
28
+ other_fields = list(k for k in self.__dict__ if k not in fields)
29
+ return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"
30
+
31
+
32
+ def dataclass_with_extra(cls: type[T]) -> type[T]:
33
+ """Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.
34
+
35
+ This decorator only works with dataclasses that inherit from `BaseInferenceType`.
36
+ """
37
+ cls = dataclass(cls)
38
+ cls.__repr__ = _repr_with_extra # type: ignore[method-assign]
39
+ return cls
40
+
41
+
42
+ @dataclass
43
+ class BaseInferenceType(dict):
44
+ """Base class for all inference types.
45
+
46
+ Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future.
47
+
48
+ Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields
49
+ are made optional, and non-expected fields are added as dict attributes).
50
+ """
51
+
52
+ @classmethod
53
+ def parse_obj_as_list(cls: type[T], data: Union[bytes, str, list, dict]) -> list[T]:
54
+ """Alias to parse server response and return a single instance.
55
+
56
+ See `parse_obj` for more details.
57
+ """
58
+ output = cls.parse_obj(data)
59
+ if not isinstance(output, list):
60
+ raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.")
61
+ return output
62
+
63
+ @classmethod
64
+ def parse_obj_as_instance(cls: type[T], data: Union[bytes, str, list, dict]) -> T:
65
+ """Alias to parse server response and return a single instance.
66
+
67
+ See `parse_obj` for more details.
68
+ """
69
+ output = cls.parse_obj(data)
70
+ if isinstance(output, list):
71
+ raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.")
72
+ return output
73
+
74
+ @classmethod
75
+ def parse_obj(cls: type[T], data: Union[bytes, str, list, dict]) -> Union[list[T], T]:
76
+ """Parse server response as a dataclass or list of dataclasses.
77
+
78
+ To enable future-compatibility, we want to handle cases where the server return more fields than expected.
79
+ In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are
80
+ added as dict attributes.
81
+ """
82
+ # Parse server response (from bytes)
83
+ if isinstance(data, bytes):
84
+ data = data.decode()
85
+ if isinstance(data, str):
86
+ data = json.loads(data)
87
+
88
+ # If a list, parse each item individually
89
+ if isinstance(data, list):
90
+ return [cls.parse_obj(d) for d in data] # type: ignore [misc]
91
+
92
+ # At this point, we expect a dict
93
+ if not isinstance(data, dict):
94
+ raise ValueError(f"Invalid data type: {type(data)}")
95
+
96
+ init_values = {}
97
+ other_values = {}
98
+ for key, value in data.items():
99
+ key = normalize_key(key)
100
+ if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init:
101
+ if isinstance(value, dict) or isinstance(value, list):
102
+ field_type = cls.__dataclass_fields__[key].type
103
+
104
+ # if `field_type` is a `BaseInferenceType`, parse it
105
+ if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType):
106
+ value = field_type.parse_obj(value)
107
+
108
+ # otherwise, recursively parse nested dataclasses (if possible)
109
+ # `get_args` returns handle Union and Optional for us
110
+ else:
111
+ expected_types = get_args(field_type)
112
+ for expected_type in expected_types:
113
+ if (
114
+ isinstance(expected_type, types.GenericAlias) and expected_type.__origin__ is list
115
+ ) or getattr(expected_type, "_name", None) == "List":
116
+ expected_type = get_args(expected_type)[
117
+ 0
118
+ ] # assume same type for all items in the list
119
+ if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType):
120
+ value = expected_type.parse_obj(value)
121
+ break
122
+ init_values[key] = value
123
+ else:
124
+ other_values[key] = value
125
+
126
+ # Make all missing fields default to None
127
+ # => ensure that dataclass initialization will never fail even if the server does not return all fields.
128
+ for key in cls.__dataclass_fields__:
129
+ if key not in init_values:
130
+ init_values[key] = None
131
+
132
+ # Initialize dataclass with expected values
133
+ item = cls(**init_values)
134
+
135
+ # Add remaining fields as dict attributes
136
+ item.update(other_values)
137
+
138
+ # Add remaining fields as extra dataclass fields.
139
+ # They won't be part of the dataclass fields but will be accessible as attributes.
140
+ # Use @dataclass_with_extra to show them in __repr__.
141
+ item.__dict__.update(other_values)
142
+ return item
143
+
144
+ def __post_init__(self):
145
+ self.update(asdict(self))
146
+
147
+ def __setitem__(self, __key: Any, __value: Any) -> None:
148
+ # Hacky way to keep dataclass values in sync when dict is updated
149
+ super().__setitem__(__key, __value)
150
+ if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value:
151
+ self.__setattr__(__key, __value)
152
+ return
153
+
154
+ def __setattr__(self, __name: str, __value: Any) -> None:
155
+ # Hacky way to keep dict values is sync when dataclass is updated
156
+ super().__setattr__(__name, __value)
157
+ if self.get(__name) != __value:
158
+ self[__name] = __value
159
+ return
160
+
161
+
162
+ def normalize_key(key: str) -> str:
163
+ # e.g "content-type" -> "content_type", "Accept" -> "accept"
164
+ return key.replace("-", "_").replace(" ", "_").lower()
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ChatCompletionInputURL(BaseInferenceType):
13
+ url: str
14
+
15
+
16
+ ChatCompletionInputMessageChunkType = Literal["text", "image_url"]
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ChatCompletionInputMessageChunk(BaseInferenceType):
21
+ type: "ChatCompletionInputMessageChunkType"
22
+ image_url: Optional[ChatCompletionInputURL] = None
23
+ text: Optional[str] = None
24
+
25
+
26
+ @dataclass_with_extra
27
+ class ChatCompletionInputFunctionDefinition(BaseInferenceType):
28
+ name: str
29
+ parameters: Any
30
+ description: Optional[str] = None
31
+
32
+
33
+ @dataclass_with_extra
34
+ class ChatCompletionInputToolCall(BaseInferenceType):
35
+ function: ChatCompletionInputFunctionDefinition
36
+ id: str
37
+ type: str
38
+
39
+
40
+ @dataclass_with_extra
41
+ class ChatCompletionInputMessage(BaseInferenceType):
42
+ role: str
43
+ content: Optional[Union[list[ChatCompletionInputMessageChunk], str]] = None
44
+ name: Optional[str] = None
45
+ tool_calls: Optional[list[ChatCompletionInputToolCall]] = None
46
+
47
+
48
+ @dataclass_with_extra
49
+ class ChatCompletionInputJSONSchema(BaseInferenceType):
50
+ name: str
51
+ """
52
+ The name of the response format.
53
+ """
54
+ description: Optional[str] = None
55
+ """
56
+ A description of what the response format is for, used by the model to determine
57
+ how to respond in the format.
58
+ """
59
+ schema: Optional[dict[str, object]] = None
60
+ """
61
+ The schema for the response format, described as a JSON Schema object. Learn how
62
+ to build JSON schemas [here](https://json-schema.org/).
63
+ """
64
+ strict: Optional[bool] = None
65
+ """
66
+ Whether to enable strict schema adherence when generating the output. If set to
67
+ true, the model will always follow the exact schema defined in the `schema`
68
+ field.
69
+ """
70
+
71
+
72
+ @dataclass_with_extra
73
+ class ChatCompletionInputResponseFormatText(BaseInferenceType):
74
+ type: Literal["text"]
75
+
76
+
77
+ @dataclass_with_extra
78
+ class ChatCompletionInputResponseFormatJSONSchema(BaseInferenceType):
79
+ type: Literal["json_schema"]
80
+ json_schema: ChatCompletionInputJSONSchema
81
+
82
+
83
+ @dataclass_with_extra
84
+ class ChatCompletionInputResponseFormatJSONObject(BaseInferenceType):
85
+ type: Literal["json_object"]
86
+
87
+
88
+ ChatCompletionInputGrammarType = Union[
89
+ ChatCompletionInputResponseFormatText,
90
+ ChatCompletionInputResponseFormatJSONSchema,
91
+ ChatCompletionInputResponseFormatJSONObject,
92
+ ]
93
+
94
+
95
+ @dataclass_with_extra
96
+ class ChatCompletionInputStreamOptions(BaseInferenceType):
97
+ include_usage: Optional[bool] = None
98
+ """If set, an additional chunk will be streamed before the data: [DONE] message. The usage
99
+ field on this chunk shows the token usage statistics for the entire request, and the
100
+ choices field will always be an empty array. All other chunks will also include a usage
101
+ field, but with a null value.
102
+ """
103
+
104
+
105
+ @dataclass_with_extra
106
+ class ChatCompletionInputFunctionName(BaseInferenceType):
107
+ name: str
108
+
109
+
110
+ @dataclass_with_extra
111
+ class ChatCompletionInputToolChoiceClass(BaseInferenceType):
112
+ function: ChatCompletionInputFunctionName
113
+
114
+
115
+ ChatCompletionInputToolChoiceEnum = Literal["auto", "none", "required"]
116
+
117
+
118
+ @dataclass_with_extra
119
+ class ChatCompletionInputTool(BaseInferenceType):
120
+ function: ChatCompletionInputFunctionDefinition
121
+ type: str
122
+
123
+
124
+ @dataclass_with_extra
125
+ class ChatCompletionInput(BaseInferenceType):
126
+ """Chat Completion Input.
127
+ Auto-generated from TGI specs.
128
+ For more details, check out
129
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
130
+ """
131
+
132
+ messages: list[ChatCompletionInputMessage]
133
+ """A list of messages comprising the conversation so far."""
134
+ frequency_penalty: Optional[float] = None
135
+ """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing
136
+ frequency in the text so far,
137
+ decreasing the model's likelihood to repeat the same line verbatim.
138
+ """
139
+ logit_bias: Optional[list[float]] = None
140
+ """UNUSED
141
+ Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON
142
+ object that maps tokens
143
+ (specified by their token ID in the tokenizer) to an associated bias value from -100 to
144
+ 100. Mathematically,
145
+ the bias is added to the logits generated by the model prior to sampling. The exact
146
+ effect will vary per model,
147
+ but values between -1 and 1 should decrease or increase likelihood of selection; values
148
+ like -100 or 100 should
149
+ result in a ban or exclusive selection of the relevant token.
150
+ """
151
+ logprobs: Optional[bool] = None
152
+ """Whether to return log probabilities of the output tokens or not. If true, returns the log
153
+ probabilities of each
154
+ output token returned in the content of message.
155
+ """
156
+ max_tokens: Optional[int] = None
157
+ """The maximum number of tokens that can be generated in the chat completion."""
158
+ model: Optional[str] = None
159
+ """[UNUSED] ID of the model to use. See the model endpoint compatibility table for details
160
+ on which models work with the Chat API.
161
+ """
162
+ n: Optional[int] = None
163
+ """UNUSED
164
+ How many chat completion choices to generate for each input message. Note that you will
165
+ be charged based on the
166
+ number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
167
+ """
168
+ presence_penalty: Optional[float] = None
169
+ """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they
170
+ appear in the text so far,
171
+ increasing the model's likelihood to talk about new topics
172
+ """
173
+ response_format: Optional[ChatCompletionInputGrammarType] = None
174
+ seed: Optional[int] = None
175
+ stop: Optional[list[str]] = None
176
+ """Up to 4 sequences where the API will stop generating further tokens."""
177
+ stream: Optional[bool] = None
178
+ stream_options: Optional[ChatCompletionInputStreamOptions] = None
179
+ temperature: Optional[float] = None
180
+ """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the
181
+ output more random, while
182
+ lower values like 0.2 will make it more focused and deterministic.
183
+ We generally recommend altering this or `top_p` but not both.
184
+ """
185
+ tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None
186
+ tool_prompt: Optional[str] = None
187
+ """A prompt to be appended before the tools"""
188
+ tools: Optional[list[ChatCompletionInputTool]] = None
189
+ """A list of tools the model may call. Currently, only functions are supported as a tool.
190
+ Use this to provide a list of
191
+ functions the model may generate JSON inputs for.
192
+ """
193
+ top_logprobs: Optional[int] = None
194
+ """An integer between 0 and 5 specifying the number of most likely tokens to return at each
195
+ token position, each with
196
+ an associated log probability. logprobs must be set to true if this parameter is used.
197
+ """
198
+ top_p: Optional[float] = None
199
+ """An alternative to sampling with temperature, called nucleus sampling, where the model
200
+ considers the results of the
201
+ tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10%
202
+ probability mass are considered.
203
+ """
204
+
205
+
206
+ @dataclass_with_extra
207
+ class ChatCompletionOutputTopLogprob(BaseInferenceType):
208
+ logprob: float
209
+ token: str
210
+
211
+
212
+ @dataclass_with_extra
213
+ class ChatCompletionOutputLogprob(BaseInferenceType):
214
+ logprob: float
215
+ token: str
216
+ top_logprobs: list[ChatCompletionOutputTopLogprob]
217
+
218
+
219
+ @dataclass_with_extra
220
+ class ChatCompletionOutputLogprobs(BaseInferenceType):
221
+ content: list[ChatCompletionOutputLogprob]
222
+
223
+
224
+ @dataclass_with_extra
225
+ class ChatCompletionOutputFunctionDefinition(BaseInferenceType):
226
+ arguments: str
227
+ name: str
228
+ description: Optional[str] = None
229
+
230
+
231
+ @dataclass_with_extra
232
+ class ChatCompletionOutputToolCall(BaseInferenceType):
233
+ function: ChatCompletionOutputFunctionDefinition
234
+ id: str
235
+ type: str
236
+
237
+
238
+ @dataclass_with_extra
239
+ class ChatCompletionOutputMessage(BaseInferenceType):
240
+ role: str
241
+ content: Optional[str] = None
242
+ reasoning: Optional[str] = None
243
+ tool_call_id: Optional[str] = None
244
+ tool_calls: Optional[list[ChatCompletionOutputToolCall]] = None
245
+
246
+
247
+ @dataclass_with_extra
248
+ class ChatCompletionOutputComplete(BaseInferenceType):
249
+ finish_reason: str
250
+ index: int
251
+ message: ChatCompletionOutputMessage
252
+ logprobs: Optional[ChatCompletionOutputLogprobs] = None
253
+
254
+
255
+ @dataclass_with_extra
256
+ class ChatCompletionOutputUsage(BaseInferenceType):
257
+ completion_tokens: int
258
+ prompt_tokens: int
259
+ total_tokens: int
260
+
261
+
262
+ @dataclass_with_extra
263
+ class ChatCompletionOutput(BaseInferenceType):
264
+ """Chat Completion Output.
265
+ Auto-generated from TGI specs.
266
+ For more details, check out
267
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
268
+ """
269
+
270
+ choices: list[ChatCompletionOutputComplete]
271
+ created: int
272
+ id: str
273
+ model: str
274
+ system_fingerprint: str
275
+ usage: ChatCompletionOutputUsage
276
+
277
+
278
+ @dataclass_with_extra
279
+ class ChatCompletionStreamOutputFunction(BaseInferenceType):
280
+ arguments: str
281
+ name: Optional[str] = None
282
+
283
+
284
+ @dataclass_with_extra
285
+ class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType):
286
+ function: ChatCompletionStreamOutputFunction
287
+ id: str
288
+ index: int
289
+ type: str
290
+
291
+
292
+ @dataclass_with_extra
293
+ class ChatCompletionStreamOutputDelta(BaseInferenceType):
294
+ role: str
295
+ content: Optional[str] = None
296
+ reasoning: Optional[str] = None
297
+ tool_call_id: Optional[str] = None
298
+ tool_calls: Optional[list[ChatCompletionStreamOutputDeltaToolCall]] = None
299
+
300
+
301
+ @dataclass_with_extra
302
+ class ChatCompletionStreamOutputTopLogprob(BaseInferenceType):
303
+ logprob: float
304
+ token: str
305
+
306
+
307
+ @dataclass_with_extra
308
+ class ChatCompletionStreamOutputLogprob(BaseInferenceType):
309
+ logprob: float
310
+ token: str
311
+ top_logprobs: list[ChatCompletionStreamOutputTopLogprob]
312
+
313
+
314
+ @dataclass_with_extra
315
+ class ChatCompletionStreamOutputLogprobs(BaseInferenceType):
316
+ content: list[ChatCompletionStreamOutputLogprob]
317
+
318
+
319
+ @dataclass_with_extra
320
+ class ChatCompletionStreamOutputChoice(BaseInferenceType):
321
+ delta: ChatCompletionStreamOutputDelta
322
+ index: int
323
+ finish_reason: Optional[str] = None
324
+ logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None
325
+
326
+
327
+ @dataclass_with_extra
328
+ class ChatCompletionStreamOutputUsage(BaseInferenceType):
329
+ completion_tokens: int
330
+ prompt_tokens: int
331
+ total_tokens: int
332
+
333
+
334
+ @dataclass_with_extra
335
+ class ChatCompletionStreamOutput(BaseInferenceType):
336
+ """Chat Completion Stream Output.
337
+ Auto-generated from TGI specs.
338
+ For more details, check out
339
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts.
340
+ """
341
+
342
+ choices: list[ChatCompletionStreamOutputChoice]
343
+ created: int
344
+ id: str
345
+ model: str
346
+ system_fingerprint: str
347
+ usage: Optional[ChatCompletionStreamOutputUsage] = None
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/depth_estimation.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class DepthEstimationInput(BaseInferenceType):
13
+ """Inputs for Depth Estimation inference"""
14
+
15
+ inputs: Any
16
+ """The input image data"""
17
+ parameters: Optional[dict[str, Any]] = None
18
+ """Additional inference parameters for Depth Estimation"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class DepthEstimationOutput(BaseInferenceType):
23
+ """Outputs of inference for the Depth Estimation task"""
24
+
25
+ depth: Any
26
+ """The predicted depth as an image"""
27
+ predicted_depth: Any
28
+ """The predicted depth as a tensor"""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class DocumentQuestionAnsweringInputData(BaseInferenceType):
13
+ """One (document, question) pair to answer"""
14
+
15
+ image: Any
16
+ """The image on which the question is asked"""
17
+ question: str
18
+ """A question to ask of the document"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class DocumentQuestionAnsweringParameters(BaseInferenceType):
23
+ """Additional inference parameters for Document Question Answering"""
24
+
25
+ doc_stride: Optional[int] = None
26
+ """If the words in the document are too long to fit with the question for the model, it will
27
+ be split in several chunks with some overlap. This argument controls the size of that
28
+ overlap.
29
+ """
30
+ handle_impossible_answer: Optional[bool] = None
31
+ """Whether to accept impossible as an answer"""
32
+ lang: Optional[str] = None
33
+ """Language to use while running OCR. Defaults to english."""
34
+ max_answer_len: Optional[int] = None
35
+ """The maximum length of predicted answers (e.g., only answers with a shorter length are
36
+ considered).
37
+ """
38
+ max_question_len: Optional[int] = None
39
+ """The maximum length of the question after tokenization. It will be truncated if needed."""
40
+ max_seq_len: Optional[int] = None
41
+ """The maximum length of the total sentence (context + question) in tokens of each chunk
42
+ passed to the model. The context will be split in several chunks (using doc_stride as
43
+ overlap) if needed.
44
+ """
45
+ top_k: Optional[int] = None
46
+ """The number of answers to return (will be chosen by order of likelihood). Can return less
47
+ than top_k answers if there are not enough options available within the context.
48
+ """
49
+ word_boxes: Optional[list[Union[list[float], str]]] = None
50
+ """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
51
+ skip the OCR step and use the provided bounding boxes instead.
52
+ """
53
+
54
+
55
+ @dataclass_with_extra
56
+ class DocumentQuestionAnsweringInput(BaseInferenceType):
57
+ """Inputs for Document Question Answering inference"""
58
+
59
+ inputs: DocumentQuestionAnsweringInputData
60
+ """One (document, question) pair to answer"""
61
+ parameters: Optional[DocumentQuestionAnsweringParameters] = None
62
+ """Additional inference parameters for Document Question Answering"""
63
+
64
+
65
+ @dataclass_with_extra
66
+ class DocumentQuestionAnsweringOutputElement(BaseInferenceType):
67
+ """Outputs of inference for the Document Question Answering task"""
68
+
69
+ answer: str
70
+ """The answer to the question."""
71
+ end: int
72
+ """The end word index of the answer (in the OCR’d version of the input or provided word
73
+ boxes).
74
+ """
75
+ score: float
76
+ """The probability associated to the answer."""
77
+ start: int
78
+ """The start word index of the answer (in the OCR’d version of the input or provided word
79
+ boxes).
80
+ """
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ FeatureExtractionInputTruncationDirection = Literal["left", "right"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class FeatureExtractionInput(BaseInferenceType):
16
+ """Feature Extraction Input.
17
+ Auto-generated from TEI specs.
18
+ For more details, check out
19
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
20
+ """
21
+
22
+ inputs: Union[list[str], str]
23
+ """The text or list of texts to embed."""
24
+ normalize: Optional[bool] = None
25
+ prompt_name: Optional[str] = None
26
+ """The name of the prompt that should be used by for encoding. If not set, no prompt
27
+ will be applied.
28
+ Must be a key in the `sentence-transformers` configuration `prompts` dictionary.
29
+ For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",
30
+ ...},
31
+ then the sentence "What is the capital of France?" will be encoded as
32
+ "query: What is the capital of France?" because the prompt text will be prepended before
33
+ any text to encode.
34
+ """
35
+ truncate: Optional[bool] = None
36
+ truncation_direction: Optional["FeatureExtractionInputTruncationDirection"] = None
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class FillMaskParameters(BaseInferenceType):
13
+ """Additional inference parameters for Fill Mask"""
14
+
15
+ targets: Optional[list[str]] = None
16
+ """When passed, the model will limit the scores to the passed targets instead of looking up
17
+ in the whole vocabulary. If the provided targets are not in the model vocab, they will be
18
+ tokenized and the first resulting token will be used (with a warning, and that might be
19
+ slower).
20
+ """
21
+ top_k: Optional[int] = None
22
+ """When passed, overrides the number of predictions to return."""
23
+
24
+
25
+ @dataclass_with_extra
26
+ class FillMaskInput(BaseInferenceType):
27
+ """Inputs for Fill Mask inference"""
28
+
29
+ inputs: str
30
+ """The text with masked tokens"""
31
+ parameters: Optional[FillMaskParameters] = None
32
+ """Additional inference parameters for Fill Mask"""
33
+
34
+
35
+ @dataclass_with_extra
36
+ class FillMaskOutputElement(BaseInferenceType):
37
+ """Outputs of inference for the Fill Mask task"""
38
+
39
+ score: float
40
+ """The corresponding probability"""
41
+ sequence: str
42
+ """The corresponding input with the mask token prediction."""
43
+ token: int
44
+ """The predicted token id (to replace the masked one)."""
45
+ token_str: Any
46
+ fill_mask_output_token_str: Optional[str] = None
47
+ """The predicted token (to replace the masked one)."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_classification.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Image Classification"""
17
+
18
+ function_to_apply: Optional["ImageClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class ImageClassificationInput(BaseInferenceType):
26
+ """Inputs for Image Classification inference"""
27
+
28
+ inputs: str
29
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
30
+ also provide the image data as a raw bytes payload.
31
+ """
32
+ parameters: Optional[ImageClassificationParameters] = None
33
+ """Additional inference parameters for Image Classification"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class ImageClassificationOutputElement(BaseInferenceType):
38
+ """Outputs of inference for the Image Classification task"""
39
+
40
+ label: str
41
+ """The predicted class label."""
42
+ score: float
43
+ """The corresponding probability."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageSegmentationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Image Segmentation"""
17
+
18
+ mask_threshold: Optional[float] = None
19
+ """Threshold to use when turning the predicted masks into binary values."""
20
+ overlap_mask_area_threshold: Optional[float] = None
21
+ """Mask overlap threshold to eliminate small, disconnected segments."""
22
+ subtask: Optional["ImageSegmentationSubtask"] = None
23
+ """Segmentation task to be performed, depending on model capabilities."""
24
+ threshold: Optional[float] = None
25
+ """Probability threshold to filter out predicted masks."""
26
+
27
+
28
+ @dataclass_with_extra
29
+ class ImageSegmentationInput(BaseInferenceType):
30
+ """Inputs for Image Segmentation inference"""
31
+
32
+ inputs: str
33
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
34
+ also provide the image data as a raw bytes payload.
35
+ """
36
+ parameters: Optional[ImageSegmentationParameters] = None
37
+ """Additional inference parameters for Image Segmentation"""
38
+
39
+
40
+ @dataclass_with_extra
41
+ class ImageSegmentationOutputElement(BaseInferenceType):
42
+ """Outputs of inference for the Image Segmentation task
43
+ A predicted mask / segment
44
+ """
45
+
46
+ label: str
47
+ """The label of the predicted segment."""
48
+ mask: str
49
+ """The corresponding mask as a black-and-white image (base64-encoded)."""
50
+ score: Optional[float] = None
51
+ """The score or confidence degree the model has."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ImageToImageTargetSize(BaseInferenceType):
13
+ """The size in pixels of the output image. This parameter is only supported by some
14
+ providers and for specific models. It will be ignored when unsupported.
15
+ """
16
+
17
+ height: int
18
+ width: int
19
+
20
+
21
+ @dataclass_with_extra
22
+ class ImageToImageParameters(BaseInferenceType):
23
+ """Additional inference parameters for Image To Image"""
24
+
25
+ guidance_scale: Optional[float] = None
26
+ """For diffusion models. A higher guidance scale value encourages the model to generate
27
+ images closely linked to the text prompt at the expense of lower image quality.
28
+ """
29
+ negative_prompt: Optional[str] = None
30
+ """One prompt to guide what NOT to include in image generation."""
31
+ num_inference_steps: Optional[int] = None
32
+ """For diffusion models. The number of denoising steps. More denoising steps usually lead to
33
+ a higher quality image at the expense of slower inference.
34
+ """
35
+ prompt: Optional[str] = None
36
+ """The text prompt to guide the image generation."""
37
+ target_size: Optional[ImageToImageTargetSize] = None
38
+ """The size in pixels of the output image. This parameter is only supported by some
39
+ providers and for specific models. It will be ignored when unsupported.
40
+ """
41
+
42
+
43
+ @dataclass_with_extra
44
+ class ImageToImageInput(BaseInferenceType):
45
+ """Inputs for Image To Image inference"""
46
+
47
+ inputs: str
48
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
49
+ also provide the image data as a raw bytes payload.
50
+ """
51
+ parameters: Optional[ImageToImageParameters] = None
52
+ """Additional inference parameters for Image To Image"""
53
+
54
+
55
+ @dataclass_with_extra
56
+ class ImageToImageOutput(BaseInferenceType):
57
+ """Outputs of inference for the Image To Image task"""
58
+
59
+ image: Any
60
+ """The output image returned as raw bytes in the payload."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_to_text.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageToTextEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageToTextGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "ImageToTextEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class ImageToTextParameters(BaseInferenceType):
76
+ """Additional inference parameters for Image To Text"""
77
+
78
+ generation_parameters: Optional[ImageToTextGenerationParameters] = None
79
+ """Parametrization of the text generation process"""
80
+ max_new_tokens: Optional[int] = None
81
+ """The amount of maximum tokens to generate."""
82
+
83
+
84
+ @dataclass_with_extra
85
+ class ImageToTextInput(BaseInferenceType):
86
+ """Inputs for Image To Text inference"""
87
+
88
+ inputs: Any
89
+ """The input image data"""
90
+ parameters: Optional[ImageToTextParameters] = None
91
+ """Additional inference parameters for Image To Text"""
92
+
93
+
94
+ @dataclass_with_extra
95
+ class ImageToTextOutput(BaseInferenceType):
96
+ """Outputs of inference for the Image To Text task"""
97
+
98
+ generated_text: Any
99
+ image_to_text_output_generated_text: Optional[str] = None
100
+ """The generated text."""
env/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_to_video.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ImageToVideoTargetSize(BaseInferenceType):
13
+ """The size in pixel of the output video frames."""
14
+
15
+ height: int
16
+ width: int
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ImageToVideoParameters(BaseInferenceType):
21
+ """Additional inference parameters for Image To Video"""
22
+
23
+ guidance_scale: Optional[float] = None
24
+ """For diffusion models. A higher guidance scale value encourages the model to generate
25
+ videos closely linked to the text prompt at the expense of lower image quality.
26
+ """
27
+ negative_prompt: Optional[str] = None
28
+ """One prompt to guide what NOT to include in video generation."""
29
+ num_frames: Optional[float] = None
30
+ """The num_frames parameter determines how many video frames are generated."""
31
+ num_inference_steps: Optional[int] = None
32
+ """The number of denoising steps. More denoising steps usually lead to a higher quality
33
+ video at the expense of slower inference.
34
+ """
35
+ prompt: Optional[str] = None
36
+ """The text prompt to guide the video generation."""
37
+ seed: Optional[int] = None
38
+ """Seed for the random number generator."""
39
+ target_size: Optional[ImageToVideoTargetSize] = None
40
+ """The size in pixel of the output video frames."""
41
+
42
+
43
+ @dataclass_with_extra
44
+ class ImageToVideoInput(BaseInferenceType):
45
+ """Inputs for Image To Video inference"""
46
+
47
+ inputs: str
48
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
49
+ also provide the image data as a raw bytes payload.
50
+ """
51
+ parameters: Optional[ImageToVideoParameters] = None
52
+ """Additional inference parameters for Image To Video"""
53
+
54
+
55
+ @dataclass_with_extra
56
+ class ImageToVideoOutput(BaseInferenceType):
57
+ """Outputs of inference for the Image To Video task"""
58
+
59
+ video: Any
60
+ """The generated video returned as raw bytes in the payload."""