ZTWHHH commited on
Commit
0b59cdb
·
verified ·
1 Parent(s): e839cd2

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__init__.py +0 -0
  4. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py +0 -0
  5. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py +187 -0
  6. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc +0 -0
  10. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc +0 -0
  11. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py +43 -0
  21. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py +114 -0
  22. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py +161 -0
  23. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py +80 -0
  24. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py +36 -0
  25. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py +47 -0
  26. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py +43 -0
  27. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py +51 -0
  28. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py +54 -0
  29. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py +58 -0
  30. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py +74 -0
  31. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py +41 -0
  32. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py +62 -0
  33. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py +42 -0
  34. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py +41 -0
  35. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py +100 -0
  36. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py +50 -0
  37. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_speech.py +100 -0
  38. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py +46 -0
  39. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/translation.py +49 -0
  40. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py +45 -0
  41. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py +49 -0
  42. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_classification.py +45 -0
  43. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py +40 -0
  44. parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py +52 -0
  45. parrot/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py +110 -0
  46. parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -152,3 +152,4 @@ parrot/lib/python3.10/site-packages/cv2/qt/fonts/DejaVuSansCondensed.ttf filter=
152
  parrot/lib/libssl.so filter=lfs diff=lfs merge=lfs -text
153
  parrot/lib/libitm.so filter=lfs diff=lfs merge=lfs -text
154
  parrot/lib/python3.10/site-packages/wandb/sdk/__pycache__/wandb_run.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
152
  parrot/lib/libssl.so filter=lfs diff=lfs merge=lfs -text
153
  parrot/lib/libitm.so filter=lfs diff=lfs merge=lfs -text
154
  parrot/lib/python3.10/site-packages/wandb/sdk/__pycache__/wandb_run.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
155
+ parrot/lib/python3.10/site-packages/wandb/sdk/internal/__pycache__/internal_api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/huggingface_hub/inference/__pycache__/_common.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/_async_client.py ADDED
The diff for this file is too large to render. See raw diff
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__init__.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is auto-generated by `utils/generate_inference_types.py`.
2
+ # Do not modify it manually.
3
+ #
4
+ # ruff: noqa: F401
5
+
6
+ from .audio_classification import (
7
+ AudioClassificationInput,
8
+ AudioClassificationOutputElement,
9
+ AudioClassificationOutputTransform,
10
+ AudioClassificationParameters,
11
+ )
12
+ from .audio_to_audio import AudioToAudioInput, AudioToAudioOutputElement
13
+ from .automatic_speech_recognition import (
14
+ AutomaticSpeechRecognitionEarlyStoppingEnum,
15
+ AutomaticSpeechRecognitionGenerationParameters,
16
+ AutomaticSpeechRecognitionInput,
17
+ AutomaticSpeechRecognitionOutput,
18
+ AutomaticSpeechRecognitionOutputChunk,
19
+ AutomaticSpeechRecognitionParameters,
20
+ )
21
+ from .base import BaseInferenceType
22
+ from .chat_completion import (
23
+ ChatCompletionInput,
24
+ ChatCompletionInputFunctionDefinition,
25
+ ChatCompletionInputFunctionName,
26
+ ChatCompletionInputGrammarType,
27
+ ChatCompletionInputGrammarTypeType,
28
+ ChatCompletionInputMessage,
29
+ ChatCompletionInputMessageChunk,
30
+ ChatCompletionInputMessageChunkType,
31
+ ChatCompletionInputStreamOptions,
32
+ ChatCompletionInputTool,
33
+ ChatCompletionInputToolChoiceClass,
34
+ ChatCompletionInputToolChoiceEnum,
35
+ ChatCompletionInputURL,
36
+ ChatCompletionOutput,
37
+ ChatCompletionOutputComplete,
38
+ ChatCompletionOutputFunctionDefinition,
39
+ ChatCompletionOutputLogprob,
40
+ ChatCompletionOutputLogprobs,
41
+ ChatCompletionOutputMessage,
42
+ ChatCompletionOutputToolCall,
43
+ ChatCompletionOutputTopLogprob,
44
+ ChatCompletionOutputUsage,
45
+ ChatCompletionStreamOutput,
46
+ ChatCompletionStreamOutputChoice,
47
+ ChatCompletionStreamOutputDelta,
48
+ ChatCompletionStreamOutputDeltaToolCall,
49
+ ChatCompletionStreamOutputFunction,
50
+ ChatCompletionStreamOutputLogprob,
51
+ ChatCompletionStreamOutputLogprobs,
52
+ ChatCompletionStreamOutputTopLogprob,
53
+ ChatCompletionStreamOutputUsage,
54
+ )
55
+ from .depth_estimation import DepthEstimationInput, DepthEstimationOutput
56
+ from .document_question_answering import (
57
+ DocumentQuestionAnsweringInput,
58
+ DocumentQuestionAnsweringInputData,
59
+ DocumentQuestionAnsweringOutputElement,
60
+ DocumentQuestionAnsweringParameters,
61
+ )
62
+ from .feature_extraction import FeatureExtractionInput, FeatureExtractionInputTruncationDirection
63
+ from .fill_mask import FillMaskInput, FillMaskOutputElement, FillMaskParameters
64
+ from .image_classification import (
65
+ ImageClassificationInput,
66
+ ImageClassificationOutputElement,
67
+ ImageClassificationOutputTransform,
68
+ ImageClassificationParameters,
69
+ )
70
+ from .image_segmentation import (
71
+ ImageSegmentationInput,
72
+ ImageSegmentationOutputElement,
73
+ ImageSegmentationParameters,
74
+ ImageSegmentationSubtask,
75
+ )
76
+ from .image_to_image import ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToImageTargetSize
77
+ from .image_to_text import (
78
+ ImageToTextEarlyStoppingEnum,
79
+ ImageToTextGenerationParameters,
80
+ ImageToTextInput,
81
+ ImageToTextOutput,
82
+ ImageToTextParameters,
83
+ )
84
+ from .object_detection import (
85
+ ObjectDetectionBoundingBox,
86
+ ObjectDetectionInput,
87
+ ObjectDetectionOutputElement,
88
+ ObjectDetectionParameters,
89
+ )
90
+ from .question_answering import (
91
+ QuestionAnsweringInput,
92
+ QuestionAnsweringInputData,
93
+ QuestionAnsweringOutputElement,
94
+ QuestionAnsweringParameters,
95
+ )
96
+ from .sentence_similarity import SentenceSimilarityInput, SentenceSimilarityInputData
97
+ from .summarization import (
98
+ SummarizationInput,
99
+ SummarizationOutput,
100
+ SummarizationParameters,
101
+ SummarizationTruncationStrategy,
102
+ )
103
+ from .table_question_answering import (
104
+ Padding,
105
+ TableQuestionAnsweringInput,
106
+ TableQuestionAnsweringInputData,
107
+ TableQuestionAnsweringOutputElement,
108
+ TableQuestionAnsweringParameters,
109
+ )
110
+ from .text2text_generation import (
111
+ Text2TextGenerationInput,
112
+ Text2TextGenerationOutput,
113
+ Text2TextGenerationParameters,
114
+ Text2TextGenerationTruncationStrategy,
115
+ )
116
+ from .text_classification import (
117
+ TextClassificationInput,
118
+ TextClassificationOutputElement,
119
+ TextClassificationOutputTransform,
120
+ TextClassificationParameters,
121
+ )
122
+ from .text_generation import (
123
+ TextGenerationInput,
124
+ TextGenerationInputGenerateParameters,
125
+ TextGenerationInputGrammarType,
126
+ TextGenerationOutput,
127
+ TextGenerationOutputBestOfSequence,
128
+ TextGenerationOutputDetails,
129
+ TextGenerationOutputFinishReason,
130
+ TextGenerationOutputPrefillToken,
131
+ TextGenerationOutputToken,
132
+ TextGenerationStreamOutput,
133
+ TextGenerationStreamOutputStreamDetails,
134
+ TextGenerationStreamOutputToken,
135
+ TypeEnum,
136
+ )
137
+ from .text_to_audio import (
138
+ TextToAudioEarlyStoppingEnum,
139
+ TextToAudioGenerationParameters,
140
+ TextToAudioInput,
141
+ TextToAudioOutput,
142
+ TextToAudioParameters,
143
+ )
144
+ from .text_to_image import TextToImageInput, TextToImageOutput, TextToImageParameters
145
+ from .text_to_speech import (
146
+ TextToSpeechEarlyStoppingEnum,
147
+ TextToSpeechGenerationParameters,
148
+ TextToSpeechInput,
149
+ TextToSpeechOutput,
150
+ TextToSpeechParameters,
151
+ )
152
+ from .text_to_video import TextToVideoInput, TextToVideoOutput, TextToVideoParameters
153
+ from .token_classification import (
154
+ TokenClassificationAggregationStrategy,
155
+ TokenClassificationInput,
156
+ TokenClassificationOutputElement,
157
+ TokenClassificationParameters,
158
+ )
159
+ from .translation import TranslationInput, TranslationOutput, TranslationParameters, TranslationTruncationStrategy
160
+ from .video_classification import (
161
+ VideoClassificationInput,
162
+ VideoClassificationOutputElement,
163
+ VideoClassificationOutputTransform,
164
+ VideoClassificationParameters,
165
+ )
166
+ from .visual_question_answering import (
167
+ VisualQuestionAnsweringInput,
168
+ VisualQuestionAnsweringInputData,
169
+ VisualQuestionAnsweringOutputElement,
170
+ VisualQuestionAnsweringParameters,
171
+ )
172
+ from .zero_shot_classification import (
173
+ ZeroShotClassificationInput,
174
+ ZeroShotClassificationOutputElement,
175
+ ZeroShotClassificationParameters,
176
+ )
177
+ from .zero_shot_image_classification import (
178
+ ZeroShotImageClassificationInput,
179
+ ZeroShotImageClassificationOutputElement,
180
+ ZeroShotImageClassificationParameters,
181
+ )
182
+ from .zero_shot_object_detection import (
183
+ ZeroShotObjectDetectionBoundingBox,
184
+ ZeroShotObjectDetectionInput,
185
+ ZeroShotObjectDetectionOutputElement,
186
+ ZeroShotObjectDetectionParameters,
187
+ )
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/audio_classification.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/automatic_speech_recognition.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/chat_completion.cpython-310.pyc ADDED
Binary file (8.21 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/depth_estimation.cpython-310.pyc ADDED
Binary file (967 Bytes). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/document_question_answering.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/feature_extraction.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/fill_mask.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/question_answering.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/sentence_similarity.cpython-310.pyc ADDED
Binary file (948 Bytes). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_generation.cpython-310.pyc ADDED
Binary file (4.86 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_image.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_speech.cpython-310.pyc ADDED
Binary file (2.16 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/text_to_video.cpython-310.pyc ADDED
Binary file (1.37 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/__pycache__/translation.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/audio_classification.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ AudioClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class AudioClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Audio Classification"""
17
+
18
+ function_to_apply: Optional["AudioClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class AudioClassificationInput(BaseInferenceType):
26
+ """Inputs for Audio Classification inference"""
27
+
28
+ inputs: str
29
+ """The input audio data as a base64-encoded string. If no `parameters` are provided, you can
30
+ also provide the audio data as a raw bytes payload.
31
+ """
32
+ parameters: Optional[AudioClassificationParameters] = None
33
+ """Additional inference parameters for Audio Classification"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class AudioClassificationOutputElement(BaseInferenceType):
38
+ """Outputs for Audio Classification inference"""
39
+
40
+ label: str
41
+ """The predicted class label."""
42
+ score: float
43
+ """The corresponding probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/automatic_speech_recognition.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import List, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ AutomaticSpeechRecognitionEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class AutomaticSpeechRecognitionGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "AutomaticSpeechRecognitionEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class AutomaticSpeechRecognitionParameters(BaseInferenceType):
76
+ """Additional inference parameters for Automatic Speech Recognition"""
77
+
78
+ return_timestamps: Optional[bool] = None
79
+ """Whether to output corresponding timestamps with the generated text"""
80
+ # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
81
+ generate_kwargs: Optional[AutomaticSpeechRecognitionGenerationParameters] = None
82
+ """Parametrization of the text generation process"""
83
+
84
+
85
+ @dataclass_with_extra
86
+ class AutomaticSpeechRecognitionInput(BaseInferenceType):
87
+ """Inputs for Automatic Speech Recognition inference"""
88
+
89
+ inputs: str
90
+ """The input audio data as a base64-encoded string. If no `parameters` are provided, you can
91
+ also provide the audio data as a raw bytes payload.
92
+ """
93
+ parameters: Optional[AutomaticSpeechRecognitionParameters] = None
94
+ """Additional inference parameters for Automatic Speech Recognition"""
95
+
96
+
97
+ @dataclass_with_extra
98
+ class AutomaticSpeechRecognitionOutputChunk(BaseInferenceType):
99
+ text: str
100
+ """A chunk of text identified by the model"""
101
+ timestamp: List[float]
102
+ """The start and end timestamps corresponding with the text"""
103
+
104
+
105
+ @dataclass_with_extra
106
+ class AutomaticSpeechRecognitionOutput(BaseInferenceType):
107
+ """Outputs of inference for the Automatic Speech Recognition task"""
108
+
109
+ text: str
110
+ """The recognized text."""
111
+ chunks: Optional[List[AutomaticSpeechRecognitionOutputChunk]] = None
112
+ """When returnTimestamps is enabled, chunks contains a list of audio chunks identified by
113
+ the model.
114
+ """
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/base.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a base class for all inference types."""
15
+
16
+ import inspect
17
+ import json
18
+ from dataclasses import asdict, dataclass
19
+ from typing import Any, Dict, List, Type, TypeVar, Union, get_args
20
+
21
+
22
+ T = TypeVar("T", bound="BaseInferenceType")
23
+
24
+
25
+ def _repr_with_extra(self):
26
+ fields = list(self.__dataclass_fields__.keys())
27
+ other_fields = list(k for k in self.__dict__ if k not in fields)
28
+ return f"{self.__class__.__name__}({', '.join(f'{k}={self.__dict__[k]!r}' for k in fields + other_fields)})"
29
+
30
+
31
+ def dataclass_with_extra(cls: Type[T]) -> Type[T]:
32
+ """Decorator to add a custom __repr__ method to a dataclass, showing all fields, including extra ones.
33
+
34
+ This decorator only works with dataclasses that inherit from `BaseInferenceType`.
35
+ """
36
+ cls = dataclass(cls)
37
+ cls.__repr__ = _repr_with_extra # type: ignore[method-assign]
38
+ return cls
39
+
40
+
41
+ @dataclass
42
+ class BaseInferenceType(dict):
43
+ """Base class for all inference types.
44
+
45
+ Object is a dataclass and a dict for backward compatibility but plan is to remove the dict part in the future.
46
+
47
+ Handle parsing from dict, list and json strings in a permissive way to ensure future-compatibility (e.g. all fields
48
+ are made optional, and non-expected fields are added as dict attributes).
49
+ """
50
+
51
+ @classmethod
52
+ def parse_obj_as_list(cls: Type[T], data: Union[bytes, str, List, Dict]) -> List[T]:
53
+ """Alias to parse server response and return a single instance.
54
+
55
+ See `parse_obj` for more details.
56
+ """
57
+ output = cls.parse_obj(data)
58
+ if not isinstance(output, list):
59
+ raise ValueError(f"Invalid input data for {cls}. Expected a list, but got {type(output)}.")
60
+ return output
61
+
62
+ @classmethod
63
+ def parse_obj_as_instance(cls: Type[T], data: Union[bytes, str, List, Dict]) -> T:
64
+ """Alias to parse server response and return a single instance.
65
+
66
+ See `parse_obj` for more details.
67
+ """
68
+ output = cls.parse_obj(data)
69
+ if isinstance(output, list):
70
+ raise ValueError(f"Invalid input data for {cls}. Expected a single instance, but got a list.")
71
+ return output
72
+
73
+ @classmethod
74
+ def parse_obj(cls: Type[T], data: Union[bytes, str, List, Dict]) -> Union[List[T], T]:
75
+ """Parse server response as a dataclass or list of dataclasses.
76
+
77
+ To enable future-compatibility, we want to handle cases where the server return more fields than expected.
78
+ In such cases, we don't want to raise an error but still create the dataclass object. Remaining fields are
79
+ added as dict attributes.
80
+ """
81
+ # Parse server response (from bytes)
82
+ if isinstance(data, bytes):
83
+ data = data.decode()
84
+ if isinstance(data, str):
85
+ data = json.loads(data)
86
+
87
+ # If a list, parse each item individually
88
+ if isinstance(data, List):
89
+ return [cls.parse_obj(d) for d in data] # type: ignore [misc]
90
+
91
+ # At this point, we expect a dict
92
+ if not isinstance(data, dict):
93
+ raise ValueError(f"Invalid data type: {type(data)}")
94
+
95
+ init_values = {}
96
+ other_values = {}
97
+ for key, value in data.items():
98
+ key = normalize_key(key)
99
+ if key in cls.__dataclass_fields__ and cls.__dataclass_fields__[key].init:
100
+ if isinstance(value, dict) or isinstance(value, list):
101
+ field_type = cls.__dataclass_fields__[key].type
102
+
103
+ # if `field_type` is a `BaseInferenceType`, parse it
104
+ if inspect.isclass(field_type) and issubclass(field_type, BaseInferenceType):
105
+ value = field_type.parse_obj(value)
106
+
107
+ # otherwise, recursively parse nested dataclasses (if possible)
108
+ # `get_args` returns handle Union and Optional for us
109
+ else:
110
+ expected_types = get_args(field_type)
111
+ for expected_type in expected_types:
112
+ if getattr(expected_type, "_name", None) == "List":
113
+ expected_type = get_args(expected_type)[
114
+ 0
115
+ ] # assume same type for all items in the list
116
+ if inspect.isclass(expected_type) and issubclass(expected_type, BaseInferenceType):
117
+ value = expected_type.parse_obj(value)
118
+ break
119
+ init_values[key] = value
120
+ else:
121
+ other_values[key] = value
122
+
123
+ # Make all missing fields default to None
124
+ # => ensure that dataclass initialization will never fail even if the server does not return all fields.
125
+ for key in cls.__dataclass_fields__:
126
+ if key not in init_values:
127
+ init_values[key] = None
128
+
129
+ # Initialize dataclass with expected values
130
+ item = cls(**init_values)
131
+
132
+ # Add remaining fields as dict attributes
133
+ item.update(other_values)
134
+
135
+ # Add remaining fields as extra dataclass fields.
136
+ # They won't be part of the dataclass fields but will be accessible as attributes.
137
+ # Use @dataclass_with_extra to show them in __repr__.
138
+ item.__dict__.update(other_values)
139
+ return item
140
+
141
+ def __post_init__(self):
142
+ self.update(asdict(self))
143
+
144
+ def __setitem__(self, __key: Any, __value: Any) -> None:
145
+ # Hacky way to keep dataclass values in sync when dict is updated
146
+ super().__setitem__(__key, __value)
147
+ if __key in self.__dataclass_fields__ and getattr(self, __key, None) != __value:
148
+ self.__setattr__(__key, __value)
149
+ return
150
+
151
+ def __setattr__(self, __name: str, __value: Any) -> None:
152
+ # Hacky way to keep dict values is sync when dataclass is updated
153
+ super().__setattr__(__name, __value)
154
+ if self.get(__name) != __value:
155
+ self[__name] = __value
156
+ return
157
+
158
+
159
+ def normalize_key(key: str) -> str:
160
+ # e.g "content-type" -> "content_type", "Accept" -> "accept"
161
+ return key.replace("-", "_").replace(" ", "_").lower()
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/document_question_answering.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class DocumentQuestionAnsweringInputData(BaseInferenceType):
13
+ """One (document, question) pair to answer"""
14
+
15
+ image: Any
16
+ """The image on which the question is asked"""
17
+ question: str
18
+ """A question to ask of the document"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class DocumentQuestionAnsweringParameters(BaseInferenceType):
23
+ """Additional inference parameters for Document Question Answering"""
24
+
25
+ doc_stride: Optional[int] = None
26
+ """If the words in the document are too long to fit with the question for the model, it will
27
+ be split in several chunks with some overlap. This argument controls the size of that
28
+ overlap.
29
+ """
30
+ handle_impossible_answer: Optional[bool] = None
31
+ """Whether to accept impossible as an answer"""
32
+ lang: Optional[str] = None
33
+ """Language to use while running OCR. Defaults to english."""
34
+ max_answer_len: Optional[int] = None
35
+ """The maximum length of predicted answers (e.g., only answers with a shorter length are
36
+ considered).
37
+ """
38
+ max_question_len: Optional[int] = None
39
+ """The maximum length of the question after tokenization. It will be truncated if needed."""
40
+ max_seq_len: Optional[int] = None
41
+ """The maximum length of the total sentence (context + question) in tokens of each chunk
42
+ passed to the model. The context will be split in several chunks (using doc_stride as
43
+ overlap) if needed.
44
+ """
45
+ top_k: Optional[int] = None
46
+ """The number of answers to return (will be chosen by order of likelihood). Can return less
47
+ than top_k answers if there are not enough options available within the context.
48
+ """
49
+ word_boxes: Optional[List[Union[List[float], str]]] = None
50
+ """A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
51
+ skip the OCR step and use the provided bounding boxes instead.
52
+ """
53
+
54
+
55
+ @dataclass_with_extra
56
+ class DocumentQuestionAnsweringInput(BaseInferenceType):
57
+ """Inputs for Document Question Answering inference"""
58
+
59
+ inputs: DocumentQuestionAnsweringInputData
60
+ """One (document, question) pair to answer"""
61
+ parameters: Optional[DocumentQuestionAnsweringParameters] = None
62
+ """Additional inference parameters for Document Question Answering"""
63
+
64
+
65
+ @dataclass_with_extra
66
+ class DocumentQuestionAnsweringOutputElement(BaseInferenceType):
67
+ """Outputs of inference for the Document Question Answering task"""
68
+
69
+ answer: str
70
+ """The answer to the question."""
71
+ end: int
72
+ """The end word index of the answer (in the OCR’d version of the input or provided word
73
+ boxes).
74
+ """
75
+ score: float
76
+ """The probability associated to the answer."""
77
+ start: int
78
+ """The start word index of the answer (in the OCR’d version of the input or provided word
79
+ boxes).
80
+ """
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/feature_extraction.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import List, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ FeatureExtractionInputTruncationDirection = Literal["Left", "Right"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class FeatureExtractionInput(BaseInferenceType):
16
+ """Feature Extraction Input.
17
+ Auto-generated from TEI specs.
18
+ For more details, check out
19
+ https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tei-import.ts.
20
+ """
21
+
22
+ inputs: Union[List[str], str]
23
+ """The text or list of texts to embed."""
24
+ normalize: Optional[bool] = None
25
+ prompt_name: Optional[str] = None
26
+ """The name of the prompt that should be used by for encoding. If not set, no prompt
27
+ will be applied.
28
+ Must be a key in the `sentence-transformers` configuration `prompts` dictionary.
29
+ For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",
30
+ ...},
31
+ then the sentence "What is the capital of France?" will be encoded as
32
+ "query: What is the capital of France?" because the prompt text will be prepended before
33
+ any text to encode.
34
+ """
35
+ truncate: Optional[bool] = None
36
+ truncation_direction: Optional["FeatureExtractionInputTruncationDirection"] = None
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class FillMaskParameters(BaseInferenceType):
13
+ """Additional inference parameters for Fill Mask"""
14
+
15
+ targets: Optional[List[str]] = None
16
+ """When passed, the model will limit the scores to the passed targets instead of looking up
17
+ in the whole vocabulary. If the provided targets are not in the model vocab, they will be
18
+ tokenized and the first resulting token will be used (with a warning, and that might be
19
+ slower).
20
+ """
21
+ top_k: Optional[int] = None
22
+ """When passed, overrides the number of predictions to return."""
23
+
24
+
25
+ @dataclass_with_extra
26
+ class FillMaskInput(BaseInferenceType):
27
+ """Inputs for Fill Mask inference"""
28
+
29
+ inputs: str
30
+ """The text with masked tokens"""
31
+ parameters: Optional[FillMaskParameters] = None
32
+ """Additional inference parameters for Fill Mask"""
33
+
34
+
35
+ @dataclass_with_extra
36
+ class FillMaskOutputElement(BaseInferenceType):
37
+ """Outputs of inference for the Fill Mask task"""
38
+
39
+ score: float
40
+ """The corresponding probability"""
41
+ sequence: str
42
+ """The corresponding input with the mask token prediction."""
43
+ token: int
44
+ """The predicted token id (to replace the masked one)."""
45
+ token_str: Any
46
+ fill_mask_output_token_str: Optional[str] = None
47
+ """The predicted token (to replace the masked one)."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_classification.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Image Classification"""
17
+
18
+ function_to_apply: Optional["ImageClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class ImageClassificationInput(BaseInferenceType):
26
+ """Inputs for Image Classification inference"""
27
+
28
+ inputs: str
29
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
30
+ also provide the image data as a raw bytes payload.
31
+ """
32
+ parameters: Optional[ImageClassificationParameters] = None
33
+ """Additional inference parameters for Image Classification"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class ImageClassificationOutputElement(BaseInferenceType):
38
+ """Outputs of inference for the Image Classification task"""
39
+
40
+ label: str
41
+ """The predicted class label."""
42
+ score: float
43
+ """The corresponding probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class ImageSegmentationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Image Segmentation"""
17
+
18
+ mask_threshold: Optional[float] = None
19
+ """Threshold to use when turning the predicted masks into binary values."""
20
+ overlap_mask_area_threshold: Optional[float] = None
21
+ """Mask overlap threshold to eliminate small, disconnected segments."""
22
+ subtask: Optional["ImageSegmentationSubtask"] = None
23
+ """Segmentation task to be performed, depending on model capabilities."""
24
+ threshold: Optional[float] = None
25
+ """Probability threshold to filter out predicted masks."""
26
+
27
+
28
+ @dataclass_with_extra
29
+ class ImageSegmentationInput(BaseInferenceType):
30
+ """Inputs for Image Segmentation inference"""
31
+
32
+ inputs: str
33
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
34
+ also provide the image data as a raw bytes payload.
35
+ """
36
+ parameters: Optional[ImageSegmentationParameters] = None
37
+ """Additional inference parameters for Image Segmentation"""
38
+
39
+
40
+ @dataclass_with_extra
41
+ class ImageSegmentationOutputElement(BaseInferenceType):
42
+ """Outputs of inference for the Image Segmentation task
43
+ A predicted mask / segment
44
+ """
45
+
46
+ label: str
47
+ """The label of the predicted segment."""
48
+ mask: str
49
+ """The corresponding mask as a black-and-white image (base64-encoded)."""
50
+ score: Optional[float] = None
51
+ """The score or confidence degree the model has."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/image_to_image.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ImageToImageTargetSize(BaseInferenceType):
13
+ """The size in pixel of the output image."""
14
+
15
+ height: int
16
+ width: int
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ImageToImageParameters(BaseInferenceType):
21
+ """Additional inference parameters for Image To Image"""
22
+
23
+ guidance_scale: Optional[float] = None
24
+ """For diffusion models. A higher guidance scale value encourages the model to generate
25
+ images closely linked to the text prompt at the expense of lower image quality.
26
+ """
27
+ negative_prompt: Optional[str] = None
28
+ """One prompt to guide what NOT to include in image generation."""
29
+ num_inference_steps: Optional[int] = None
30
+ """For diffusion models. The number of denoising steps. More denoising steps usually lead to
31
+ a higher quality image at the expense of slower inference.
32
+ """
33
+ target_size: Optional[ImageToImageTargetSize] = None
34
+ """The size in pixel of the output image."""
35
+
36
+
37
+ @dataclass_with_extra
38
+ class ImageToImageInput(BaseInferenceType):
39
+ """Inputs for Image To Image inference"""
40
+
41
+ inputs: str
42
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
43
+ also provide the image data as a raw bytes payload.
44
+ """
45
+ parameters: Optional[ImageToImageParameters] = None
46
+ """Additional inference parameters for Image To Image"""
47
+
48
+
49
+ @dataclass_with_extra
50
+ class ImageToImageOutput(BaseInferenceType):
51
+ """Outputs of inference for the Image To Image task"""
52
+
53
+ image: Any
54
+ """The output image returned as raw bytes in the payload."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/object_detection.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ObjectDetectionParameters(BaseInferenceType):
13
+ """Additional inference parameters for Object Detection"""
14
+
15
+ threshold: Optional[float] = None
16
+ """The probability necessary to make a prediction."""
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ObjectDetectionInput(BaseInferenceType):
21
+ """Inputs for Object Detection inference"""
22
+
23
+ inputs: str
24
+ """The input image data as a base64-encoded string. If no `parameters` are provided, you can
25
+ also provide the image data as a raw bytes payload.
26
+ """
27
+ parameters: Optional[ObjectDetectionParameters] = None
28
+ """Additional inference parameters for Object Detection"""
29
+
30
+
31
+ @dataclass_with_extra
32
+ class ObjectDetectionBoundingBox(BaseInferenceType):
33
+ """The predicted bounding box. Coordinates are relative to the top left corner of the input
34
+ image.
35
+ """
36
+
37
+ xmax: int
38
+ """The x-coordinate of the bottom-right corner of the bounding box."""
39
+ xmin: int
40
+ """The x-coordinate of the top-left corner of the bounding box."""
41
+ ymax: int
42
+ """The y-coordinate of the bottom-right corner of the bounding box."""
43
+ ymin: int
44
+ """The y-coordinate of the top-left corner of the bounding box."""
45
+
46
+
47
+ @dataclass_with_extra
48
+ class ObjectDetectionOutputElement(BaseInferenceType):
49
+ """Outputs of inference for the Object Detection task"""
50
+
51
+ box: ObjectDetectionBoundingBox
52
+ """The predicted bounding box. Coordinates are relative to the top left corner of the input
53
+ image.
54
+ """
55
+ label: str
56
+ """The predicted label for the bounding box."""
57
+ score: float
58
+ """The associated score / probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/question_answering.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class QuestionAnsweringInputData(BaseInferenceType):
13
+ """One (context, question) pair to answer"""
14
+
15
+ context: str
16
+ """The context to be used for answering the question"""
17
+ question: str
18
+ """The question to be answered"""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class QuestionAnsweringParameters(BaseInferenceType):
23
+ """Additional inference parameters for Question Answering"""
24
+
25
+ align_to_words: Optional[bool] = None
26
+ """Attempts to align the answer to real words. Improves quality on space separated
27
+ languages. Might hurt on non-space-separated languages (like Japanese or Chinese)
28
+ """
29
+ doc_stride: Optional[int] = None
30
+ """If the context is too long to fit with the question for the model, it will be split in
31
+ several chunks with some overlap. This argument controls the size of that overlap.
32
+ """
33
+ handle_impossible_answer: Optional[bool] = None
34
+ """Whether to accept impossible as an answer."""
35
+ max_answer_len: Optional[int] = None
36
+ """The maximum length of predicted answers (e.g., only answers with a shorter length are
37
+ considered).
38
+ """
39
+ max_question_len: Optional[int] = None
40
+ """The maximum length of the question after tokenization. It will be truncated if needed."""
41
+ max_seq_len: Optional[int] = None
42
+ """The maximum length of the total sentence (context + question) in tokens of each chunk
43
+ passed to the model. The context will be split in several chunks (using docStride as
44
+ overlap) if needed.
45
+ """
46
+ top_k: Optional[int] = None
47
+ """The number of answers to return (will be chosen by order of likelihood). Note that we
48
+ return less than topk answers if there are not enough options available within the
49
+ context.
50
+ """
51
+
52
+
53
+ @dataclass_with_extra
54
+ class QuestionAnsweringInput(BaseInferenceType):
55
+ """Inputs for Question Answering inference"""
56
+
57
+ inputs: QuestionAnsweringInputData
58
+ """One (context, question) pair to answer"""
59
+ parameters: Optional[QuestionAnsweringParameters] = None
60
+ """Additional inference parameters for Question Answering"""
61
+
62
+
63
+ @dataclass_with_extra
64
+ class QuestionAnsweringOutputElement(BaseInferenceType):
65
+ """Outputs of inference for the Question Answering task"""
66
+
67
+ answer: str
68
+ """The answer to the question."""
69
+ end: int
70
+ """The character position in the input where the answer ends."""
71
+ score: float
72
+ """The probability associated to the answer."""
73
+ start: int
74
+ """The character position in the input where the answer begins."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/summarization.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Dict, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ SummarizationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class SummarizationParameters(BaseInferenceType):
16
+ """Additional inference parameters for summarization."""
17
+
18
+ clean_up_tokenization_spaces: Optional[bool] = None
19
+ """Whether to clean up the potential extra spaces in the text output."""
20
+ generate_parameters: Optional[Dict[str, Any]] = None
21
+ """Additional parametrization of the text generation algorithm."""
22
+ truncation: Optional["SummarizationTruncationStrategy"] = None
23
+ """The truncation strategy to use."""
24
+
25
+
26
+ @dataclass_with_extra
27
+ class SummarizationInput(BaseInferenceType):
28
+ """Inputs for Summarization inference"""
29
+
30
+ inputs: str
31
+ """The input text to summarize."""
32
+ parameters: Optional[SummarizationParameters] = None
33
+ """Additional inference parameters for summarization."""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class SummarizationOutput(BaseInferenceType):
38
+ """Outputs of inference for the Summarization task"""
39
+
40
+ summary_text: str
41
+ """The summarized text."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/table_question_answering.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Dict, List, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class TableQuestionAnsweringInputData(BaseInferenceType):
13
+ """One (table, question) pair to answer"""
14
+
15
+ question: str
16
+ """The question to be answered about the table"""
17
+ table: Dict[str, List[str]]
18
+ """The table to serve as context for the questions"""
19
+
20
+
21
+ Padding = Literal["do_not_pad", "longest", "max_length"]
22
+
23
+
24
+ @dataclass_with_extra
25
+ class TableQuestionAnsweringParameters(BaseInferenceType):
26
+ """Additional inference parameters for Table Question Answering"""
27
+
28
+ padding: Optional["Padding"] = None
29
+ """Activates and controls padding."""
30
+ sequential: Optional[bool] = None
31
+ """Whether to do inference sequentially or as a batch. Batching is faster, but models like
32
+ SQA require the inference to be done sequentially to extract relations within sequences,
33
+ given their conversational nature.
34
+ """
35
+ truncation: Optional[bool] = None
36
+ """Activates and controls truncation."""
37
+
38
+
39
+ @dataclass_with_extra
40
+ class TableQuestionAnsweringInput(BaseInferenceType):
41
+ """Inputs for Table Question Answering inference"""
42
+
43
+ inputs: TableQuestionAnsweringInputData
44
+ """One (table, question) pair to answer"""
45
+ parameters: Optional[TableQuestionAnsweringParameters] = None
46
+ """Additional inference parameters for Table Question Answering"""
47
+
48
+
49
+ @dataclass_with_extra
50
+ class TableQuestionAnsweringOutputElement(BaseInferenceType):
51
+ """Outputs of inference for the Table Question Answering task"""
52
+
53
+ answer: str
54
+ """The answer of the question given the table. If there is an aggregator, the answer will be
55
+ preceded by `AGGREGATOR >`.
56
+ """
57
+ cells: List[str]
58
+ """List of strings made up of the answer cell values."""
59
+ coordinates: List[List[int]]
60
+ """Coordinates of the cells of the answers."""
61
+ aggregator: Optional[str] = None
62
+ """If the model has an aggregator, this returns the aggregator."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Dict, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class Text2TextGenerationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Text2text Generation"""
17
+
18
+ clean_up_tokenization_spaces: Optional[bool] = None
19
+ """Whether to clean up the potential extra spaces in the text output."""
20
+ generate_parameters: Optional[Dict[str, Any]] = None
21
+ """Additional parametrization of the text generation algorithm"""
22
+ truncation: Optional["Text2TextGenerationTruncationStrategy"] = None
23
+ """The truncation strategy to use"""
24
+
25
+
26
+ @dataclass_with_extra
27
+ class Text2TextGenerationInput(BaseInferenceType):
28
+ """Inputs for Text2text Generation inference"""
29
+
30
+ inputs: str
31
+ """The input text data"""
32
+ parameters: Optional[Text2TextGenerationParameters] = None
33
+ """Additional inference parameters for Text2text Generation"""
34
+
35
+
36
+ @dataclass_with_extra
37
+ class Text2TextGenerationOutput(BaseInferenceType):
38
+ """Outputs of inference for the Text2text Generation task"""
39
+
40
+ generated_text: Any
41
+ text2_text_generation_output_generated_text: Optional[str] = None
42
+ """The generated text."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_classification.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TextClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Text Classification"""
17
+
18
+ function_to_apply: Optional["TextClassificationOutputTransform"] = None
19
+ """The function to apply to the model outputs in order to retrieve the scores."""
20
+ top_k: Optional[int] = None
21
+ """When specified, limits the output to the top K most probable classes."""
22
+
23
+
24
+ @dataclass_with_extra
25
+ class TextClassificationInput(BaseInferenceType):
26
+ """Inputs for Text Classification inference"""
27
+
28
+ inputs: str
29
+ """The text to classify"""
30
+ parameters: Optional[TextClassificationParameters] = None
31
+ """Additional inference parameters for Text Classification"""
32
+
33
+
34
+ @dataclass_with_extra
35
+ class TextClassificationOutputElement(BaseInferenceType):
36
+ """Outputs of inference for the Text Classification task"""
37
+
38
+ label: str
39
+ """The predicted class label."""
40
+ score: float
41
+ """The corresponding probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TextToAudioEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextToAudioGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "TextToAudioEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class TextToAudioParameters(BaseInferenceType):
76
+ """Additional inference parameters for Text To Audio"""
77
+
78
+ # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
79
+ generate_kwargs: Optional[TextToAudioGenerationParameters] = None
80
+ """Parametrization of the text generation process"""
81
+
82
+
83
+ @dataclass_with_extra
84
+ class TextToAudioInput(BaseInferenceType):
85
+ """Inputs for Text To Audio inference"""
86
+
87
+ inputs: str
88
+ """The input text data"""
89
+ parameters: Optional[TextToAudioParameters] = None
90
+ """Additional inference parameters for Text To Audio"""
91
+
92
+
93
+ @dataclass_with_extra
94
+ class TextToAudioOutput(BaseInferenceType):
95
+ """Outputs of inference for the Text To Audio task"""
96
+
97
+ audio: Any
98
+ """The generated audio waveform."""
99
+ sampling_rate: float
100
+ """The sampling rate of the generated audio waveform."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class TextToImageParameters(BaseInferenceType):
13
+ """Additional inference parameters for Text To Image"""
14
+
15
+ guidance_scale: Optional[float] = None
16
+ """A higher guidance scale value encourages the model to generate images closely linked to
17
+ the text prompt, but values too high may cause saturation and other artifacts.
18
+ """
19
+ height: Optional[int] = None
20
+ """The height in pixels of the output image"""
21
+ negative_prompt: Optional[str] = None
22
+ """One prompt to guide what NOT to include in image generation."""
23
+ num_inference_steps: Optional[int] = None
24
+ """The number of denoising steps. More denoising steps usually lead to a higher quality
25
+ image at the expense of slower inference.
26
+ """
27
+ scheduler: Optional[str] = None
28
+ """Override the scheduler with a compatible one."""
29
+ seed: Optional[int] = None
30
+ """Seed for the random number generator."""
31
+ width: Optional[int] = None
32
+ """The width in pixels of the output image"""
33
+
34
+
35
+ @dataclass_with_extra
36
+ class TextToImageInput(BaseInferenceType):
37
+ """Inputs for Text To Image inference"""
38
+
39
+ inputs: str
40
+ """The input text data (sometimes called "prompt")"""
41
+ parameters: Optional[TextToImageParameters] = None
42
+ """Additional inference parameters for Text To Image"""
43
+
44
+
45
+ @dataclass_with_extra
46
+ class TextToImageOutput(BaseInferenceType):
47
+ """Outputs of inference for the Text To Image task"""
48
+
49
+ image: Any
50
+ """The generated image returned as raw bytes in the payload."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_speech.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional, Union
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TextToSpeechEarlyStoppingEnum = Literal["never"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TextToSpeechGenerationParameters(BaseInferenceType):
16
+ """Parametrization of the text generation process"""
17
+
18
+ do_sample: Optional[bool] = None
19
+ """Whether to use sampling instead of greedy decoding when generating new tokens."""
20
+ early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None
21
+ """Controls the stopping condition for beam-based methods."""
22
+ epsilon_cutoff: Optional[float] = None
23
+ """If set to float strictly between 0 and 1, only tokens with a conditional probability
24
+ greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
25
+ 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
26
+ Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
27
+ """
28
+ eta_cutoff: Optional[float] = None
29
+ """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
30
+ float strictly between 0 and 1, a token is only considered if it is greater than either
31
+ eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter
32
+ term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In
33
+ the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model.
34
+ See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
35
+ for more details.
36
+ """
37
+ max_length: Optional[int] = None
38
+ """The maximum length (in tokens) of the generated text, including the input."""
39
+ max_new_tokens: Optional[int] = None
40
+ """The maximum number of tokens to generate. Takes precedence over max_length."""
41
+ min_length: Optional[int] = None
42
+ """The minimum length (in tokens) of the generated text, including the input."""
43
+ min_new_tokens: Optional[int] = None
44
+ """The minimum number of tokens to generate. Takes precedence over min_length."""
45
+ num_beam_groups: Optional[int] = None
46
+ """Number of groups to divide num_beams into in order to ensure diversity among different
47
+ groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
48
+ """
49
+ num_beams: Optional[int] = None
50
+ """Number of beams to use for beam search."""
51
+ penalty_alpha: Optional[float] = None
52
+ """The value balances the model confidence and the degeneration penalty in contrastive
53
+ search decoding.
54
+ """
55
+ temperature: Optional[float] = None
56
+ """The value used to modulate the next token probabilities."""
57
+ top_k: Optional[int] = None
58
+ """The number of highest probability vocabulary tokens to keep for top-k-filtering."""
59
+ top_p: Optional[float] = None
60
+ """If set to float < 1, only the smallest set of most probable tokens with probabilities
61
+ that add up to top_p or higher are kept for generation.
62
+ """
63
+ typical_p: Optional[float] = None
64
+ """Local typicality measures how similar the conditional probability of predicting a target
65
+ token next is to the expected conditional probability of predicting a random token next,
66
+ given the partial text already generated. If set to float < 1, the smallest set of the
67
+ most locally typical tokens with probabilities that add up to typical_p or higher are
68
+ kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
69
+ """
70
+ use_cache: Optional[bool] = None
71
+ """Whether the model should use the past last key/values attentions to speed up decoding"""
72
+
73
+
74
+ @dataclass_with_extra
75
+ class TextToSpeechParameters(BaseInferenceType):
76
+ """Additional inference parameters for Text To Speech"""
77
+
78
+ # Will be deprecated in the future when the renaming to `generation_parameters` is implemented in transformers
79
+ generate_kwargs: Optional[TextToSpeechGenerationParameters] = None
80
+ """Parametrization of the text generation process"""
81
+
82
+
83
+ @dataclass_with_extra
84
+ class TextToSpeechInput(BaseInferenceType):
85
+ """Inputs for Text To Speech inference"""
86
+
87
+ inputs: str
88
+ """The input text data"""
89
+ parameters: Optional[TextToSpeechParameters] = None
90
+ """Additional inference parameters for Text To Speech"""
91
+
92
+
93
+ @dataclass_with_extra
94
+ class TextToSpeechOutput(BaseInferenceType):
95
+ """Outputs of inference for the Text To Speech task"""
96
+
97
+ audio: Any
98
+ """The generated audio"""
99
+ sampling_rate: Optional[float] = None
100
+ """The sampling rate of the generated audio waveform."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, List, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class TextToVideoParameters(BaseInferenceType):
13
+ """Additional inference parameters for Text To Video"""
14
+
15
+ guidance_scale: Optional[float] = None
16
+ """A higher guidance scale value encourages the model to generate videos closely linked to
17
+ the text prompt, but values too high may cause saturation and other artifacts.
18
+ """
19
+ negative_prompt: Optional[List[str]] = None
20
+ """One or several prompt to guide what NOT to include in video generation."""
21
+ num_frames: Optional[float] = None
22
+ """The num_frames parameter determines how many video frames are generated."""
23
+ num_inference_steps: Optional[int] = None
24
+ """The number of denoising steps. More denoising steps usually lead to a higher quality
25
+ video at the expense of slower inference.
26
+ """
27
+ seed: Optional[int] = None
28
+ """Seed for the random number generator."""
29
+
30
+
31
+ @dataclass_with_extra
32
+ class TextToVideoInput(BaseInferenceType):
33
+ """Inputs for Text To Video inference"""
34
+
35
+ inputs: str
36
+ """The input text data (sometimes called "prompt")"""
37
+ parameters: Optional[TextToVideoParameters] = None
38
+ """Additional inference parameters for Text To Video"""
39
+
40
+
41
+ @dataclass_with_extra
42
+ class TextToVideoOutput(BaseInferenceType):
43
+ """Outputs of inference for the Text To Video task"""
44
+
45
+ video: Any
46
+ """The generated video returned as raw bytes in the payload."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/translation.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Dict, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ TranslationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class TranslationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Translation"""
17
+
18
+ clean_up_tokenization_spaces: Optional[bool] = None
19
+ """Whether to clean up the potential extra spaces in the text output."""
20
+ generate_parameters: Optional[Dict[str, Any]] = None
21
+ """Additional parametrization of the text generation algorithm."""
22
+ src_lang: Optional[str] = None
23
+ """The source language of the text. Required for models that can translate from multiple
24
+ languages.
25
+ """
26
+ tgt_lang: Optional[str] = None
27
+ """Target language to translate to. Required for models that can translate to multiple
28
+ languages.
29
+ """
30
+ truncation: Optional["TranslationTruncationStrategy"] = None
31
+ """The truncation strategy to use."""
32
+
33
+
34
+ @dataclass_with_extra
35
+ class TranslationInput(BaseInferenceType):
36
+ """Inputs for Translation inference"""
37
+
38
+ inputs: str
39
+ """The text to translate."""
40
+ parameters: Optional[TranslationParameters] = None
41
+ """Additional inference parameters for Translation"""
42
+
43
+
44
+ @dataclass_with_extra
45
+ class TranslationOutput(BaseInferenceType):
46
+ """Outputs of inference for the Translation task"""
47
+
48
+ translation_text: str
49
+ """The translated text."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/video_classification.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Literal, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ VideoClassificationOutputTransform = Literal["sigmoid", "softmax", "none"]
12
+
13
+
14
+ @dataclass_with_extra
15
+ class VideoClassificationParameters(BaseInferenceType):
16
+ """Additional inference parameters for Video Classification"""
17
+
18
+ frame_sampling_rate: Optional[int] = None
19
+ """The sampling rate used to select frames from the video."""
20
+ function_to_apply: Optional["VideoClassificationOutputTransform"] = None
21
+ """The function to apply to the model outputs in order to retrieve the scores."""
22
+ num_frames: Optional[int] = None
23
+ """The number of sampled frames to consider for classification."""
24
+ top_k: Optional[int] = None
25
+ """When specified, limits the output to the top K most probable classes."""
26
+
27
+
28
+ @dataclass_with_extra
29
+ class VideoClassificationInput(BaseInferenceType):
30
+ """Inputs for Video Classification inference"""
31
+
32
+ inputs: Any
33
+ """The input video data"""
34
+ parameters: Optional[VideoClassificationParameters] = None
35
+ """Additional inference parameters for Video Classification"""
36
+
37
+
38
+ @dataclass_with_extra
39
+ class VideoClassificationOutputElement(BaseInferenceType):
40
+ """Outputs of inference for the Video Classification task"""
41
+
42
+ label: str
43
+ """The predicted class label."""
44
+ score: float
45
+ """The corresponding probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import Any, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class VisualQuestionAnsweringInputData(BaseInferenceType):
13
+ """One (image, question) pair to answer"""
14
+
15
+ image: Any
16
+ """The image."""
17
+ question: str
18
+ """The question to answer based on the image."""
19
+
20
+
21
+ @dataclass_with_extra
22
+ class VisualQuestionAnsweringParameters(BaseInferenceType):
23
+ """Additional inference parameters for Visual Question Answering"""
24
+
25
+ top_k: Optional[int] = None
26
+ """The number of answers to return (will be chosen by order of likelihood). Note that we
27
+ return less than topk answers if there are not enough options available within the
28
+ context.
29
+ """
30
+
31
+
32
+ @dataclass_with_extra
33
+ class VisualQuestionAnsweringInput(BaseInferenceType):
34
+ """Inputs for Visual Question Answering inference"""
35
+
36
+ inputs: VisualQuestionAnsweringInputData
37
+ """One (image, question) pair to answer"""
38
+ parameters: Optional[VisualQuestionAnsweringParameters] = None
39
+ """Additional inference parameters for Visual Question Answering"""
40
+
41
+
42
+ @dataclass_with_extra
43
+ class VisualQuestionAnsweringOutputElement(BaseInferenceType):
44
+ """Outputs of inference for the Visual Question Answering task"""
45
+
46
+ score: float
47
+ """The associated score / probability"""
48
+ answer: Optional[str] = None
49
+ """The answer to the question"""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_classification.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import List, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ZeroShotClassificationParameters(BaseInferenceType):
13
+ """Additional inference parameters for Zero Shot Classification"""
14
+
15
+ candidate_labels: List[str]
16
+ """The set of possible class labels to classify the text into."""
17
+ hypothesis_template: Optional[str] = None
18
+ """The sentence used in conjunction with `candidate_labels` to attempt the text
19
+ classification by replacing the placeholder with the candidate labels.
20
+ """
21
+ multi_label: Optional[bool] = None
22
+ """Whether multiple candidate labels can be true. If false, the scores are normalized such
23
+ that the sum of the label likelihoods for each sequence is 1. If true, the labels are
24
+ considered independent and probabilities are normalized for each candidate.
25
+ """
26
+
27
+
28
+ @dataclass_with_extra
29
+ class ZeroShotClassificationInput(BaseInferenceType):
30
+ """Inputs for Zero Shot Classification inference"""
31
+
32
+ inputs: str
33
+ """The text to classify"""
34
+ parameters: ZeroShotClassificationParameters
35
+ """Additional inference parameters for Zero Shot Classification"""
36
+
37
+
38
+ @dataclass_with_extra
39
+ class ZeroShotClassificationOutputElement(BaseInferenceType):
40
+ """Outputs of inference for the Zero Shot Classification task"""
41
+
42
+ label: str
43
+ """The predicted class label."""
44
+ score: float
45
+ """The corresponding probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_image_classification.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import List, Optional
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ZeroShotImageClassificationParameters(BaseInferenceType):
13
+ """Additional inference parameters for Zero Shot Image Classification"""
14
+
15
+ candidate_labels: List[str]
16
+ """The candidate labels for this image"""
17
+ hypothesis_template: Optional[str] = None
18
+ """The sentence used in conjunction with `candidate_labels` to attempt the image
19
+ classification by replacing the placeholder with the candidate labels.
20
+ """
21
+
22
+
23
+ @dataclass_with_extra
24
+ class ZeroShotImageClassificationInput(BaseInferenceType):
25
+ """Inputs for Zero Shot Image Classification inference"""
26
+
27
+ inputs: str
28
+ """The input image data to classify as a base64-encoded string."""
29
+ parameters: ZeroShotImageClassificationParameters
30
+ """Additional inference parameters for Zero Shot Image Classification"""
31
+
32
+
33
+ @dataclass_with_extra
34
+ class ZeroShotImageClassificationOutputElement(BaseInferenceType):
35
+ """Outputs of inference for the Zero Shot Image Classification task"""
36
+
37
+ label: str
38
+ """The predicted class label."""
39
+ score: float
40
+ """The corresponding probability."""
parrot/lib/python3.10/site-packages/huggingface_hub/inference/_generated/types/zero_shot_object_detection.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Inference code generated from the JSON schema spec in @huggingface/tasks.
2
+ #
3
+ # See:
4
+ # - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts
5
+ # - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks.
6
+ from typing import List
7
+
8
+ from .base import BaseInferenceType, dataclass_with_extra
9
+
10
+
11
+ @dataclass_with_extra
12
+ class ZeroShotObjectDetectionParameters(BaseInferenceType):
13
+ """Additional inference parameters for Zero Shot Object Detection"""
14
+
15
+ candidate_labels: List[str]
16
+ """The candidate labels for this image"""
17
+
18
+
19
+ @dataclass_with_extra
20
+ class ZeroShotObjectDetectionInput(BaseInferenceType):
21
+ """Inputs for Zero Shot Object Detection inference"""
22
+
23
+ inputs: str
24
+ """The input image data as a base64-encoded string."""
25
+ parameters: ZeroShotObjectDetectionParameters
26
+ """Additional inference parameters for Zero Shot Object Detection"""
27
+
28
+
29
+ @dataclass_with_extra
30
+ class ZeroShotObjectDetectionBoundingBox(BaseInferenceType):
31
+ """The predicted bounding box. Coordinates are relative to the top left corner of the input
32
+ image.
33
+ """
34
+
35
+ xmax: int
36
+ xmin: int
37
+ ymax: int
38
+ ymin: int
39
+
40
+
41
+ @dataclass_with_extra
42
+ class ZeroShotObjectDetectionOutputElement(BaseInferenceType):
43
+ """Outputs of inference for the Zero Shot Object Detection task"""
44
+
45
+ box: ZeroShotObjectDetectionBoundingBox
46
+ """The predicted bounding box. Coordinates are relative to the top left corner of the input
47
+ image.
48
+ """
49
+ label: str
50
+ """A candidate label"""
51
+ score: float
52
+ """The associated score / probability"""
parrot/lib/python3.10/site-packages/huggingface_hub/utils/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+
16
+ # ruff: noqa: F401
17
+
18
+ from huggingface_hub.errors import (
19
+ BadRequestError,
20
+ CacheNotFound,
21
+ CorruptedCacheException,
22
+ DisabledRepoError,
23
+ EntryNotFoundError,
24
+ FileMetadataError,
25
+ GatedRepoError,
26
+ HfHubHTTPError,
27
+ HFValidationError,
28
+ LocalEntryNotFoundError,
29
+ LocalTokenNotFoundError,
30
+ NotASafetensorsRepoError,
31
+ OfflineModeIsEnabled,
32
+ RepositoryNotFoundError,
33
+ RevisionNotFoundError,
34
+ SafetensorsParsingError,
35
+ )
36
+
37
+ from . import tqdm as _tqdm # _tqdm is the module
38
+ from ._auth import get_stored_tokens, get_token
39
+ from ._cache_assets import cached_assets_path
40
+ from ._cache_manager import (
41
+ CachedFileInfo,
42
+ CachedRepoInfo,
43
+ CachedRevisionInfo,
44
+ DeleteCacheStrategy,
45
+ HFCacheInfo,
46
+ scan_cache_dir,
47
+ )
48
+ from ._chunk_utils import chunk_iterable
49
+ from ._datetime import parse_datetime
50
+ from ._experimental import experimental
51
+ from ._fixes import SoftTemporaryDirectory, WeakFileLock, yaml_dump
52
+ from ._git_credential import list_credential_helpers, set_git_credential, unset_git_credential
53
+ from ._headers import build_hf_headers, get_token_to_send
54
+ from ._hf_folder import HfFolder
55
+ from ._http import (
56
+ configure_http_backend,
57
+ fix_hf_endpoint_in_url,
58
+ get_session,
59
+ hf_raise_for_status,
60
+ http_backoff,
61
+ reset_sessions,
62
+ )
63
+ from ._pagination import paginate
64
+ from ._paths import DEFAULT_IGNORE_PATTERNS, FORBIDDEN_FOLDERS, filter_repo_objects
65
+ from ._runtime import (
66
+ dump_environment_info,
67
+ get_aiohttp_version,
68
+ get_fastai_version,
69
+ get_fastapi_version,
70
+ get_fastcore_version,
71
+ get_gradio_version,
72
+ get_graphviz_version,
73
+ get_hf_hub_version,
74
+ get_hf_transfer_version,
75
+ get_jinja_version,
76
+ get_numpy_version,
77
+ get_pillow_version,
78
+ get_pydantic_version,
79
+ get_pydot_version,
80
+ get_python_version,
81
+ get_tensorboard_version,
82
+ get_tf_version,
83
+ get_torch_version,
84
+ is_aiohttp_available,
85
+ is_colab_enterprise,
86
+ is_fastai_available,
87
+ is_fastapi_available,
88
+ is_fastcore_available,
89
+ is_google_colab,
90
+ is_gradio_available,
91
+ is_graphviz_available,
92
+ is_hf_transfer_available,
93
+ is_jinja_available,
94
+ is_notebook,
95
+ is_numpy_available,
96
+ is_package_available,
97
+ is_pillow_available,
98
+ is_pydantic_available,
99
+ is_pydot_available,
100
+ is_safetensors_available,
101
+ is_tensorboard_available,
102
+ is_tf_available,
103
+ is_torch_available,
104
+ )
105
+ from ._safetensors import SafetensorsFileMetadata, SafetensorsRepoMetadata, TensorInfo
106
+ from ._subprocess import capture_output, run_interactive_subprocess, run_subprocess
107
+ from ._telemetry import send_telemetry
108
+ from ._typing import is_jsonable, is_simple_optional_type, unwrap_simple_optional_type
109
+ from ._validators import smoothly_deprecate_use_auth_token, validate_hf_hub_args, validate_repo_id
110
+ from .tqdm import are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm, tqdm_stream_file
parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-310.pyc ADDED
Binary file (7.49 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_hf_folder.cpython-310.pyc ADDED
Binary file (1.94 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-310.pyc ADDED
Binary file (4.98 kB). View file